From a8d8f1becd8d062e50a37796c2c534251a58db4f Mon Sep 17 00:00:00 2001 From: jjfeing Date: Sat, 30 Aug 2025 14:15:20 +0800 Subject: [PATCH 1/2] add nameapce lite --- .../examples/cloud_infer/runtime_cpp/flags.h | 4 +- .../pass/pass_registry_tutorial.h | 4 +- .../infer/custom_add_infer.cc | 4 +- .../converter_extend/infer/custom_common.cc | 4 +- .../converter_extend/infer/custom_common.h | 4 +- .../node_parser/add_parser_tutorial.cc | 4 +- .../node_parser/add_parser_tutorial.h | 4 +- .../pass/pass_registry_tutorial.h | 4 +- .../runtime_extend/src/custom_add_infer.cc | 4 +- .../runtime_extend/src/custom_add_kernel.cc | 4 +- .../runtime_extend/src/custom_common.cc | 4 +- .../runtime_extend/src/custom_common.h | 4 +- .../src/custom_add_infer.cc | 4 +- .../src/custom_add_kernel_gpu.cc | 4 +- .../runtime_gpu_extend/src/custom_common.cc | 4 +- .../runtime_gpu_extend/src/custom_common.h | 4 +- .../include/api/multi_model_runner.h | 4 +- mindspore-lite/include/converter.h | 4 +- mindspore-lite/include/kernel_interface.h | 4 +- .../include/registry/converter_context.h | 4 +- .../include/registry/model_parser.h | 4 +- .../include/registry/model_parser_registry.h | 4 +- mindspore-lite/include/registry/node_parser.h | 4 +- .../include/registry/node_parser_registry.h | 4 +- mindspore-lite/include/registry/pass_base.h | 4 +- .../include/registry/pass_registry.h | 4 +- .../include/registry/register_kernel.h | 4 +- .../registry/register_kernel_interface.h | 4 +- mindspore-lite/include/train/metrics.h | 4 +- mindspore-lite/include/train/train_loop.h | 4 +- .../minddata/dataset/api/data_helper.cc | 4 +- .../minddata/dataset/api/datasets.cc | 4 +- .../minddata/dataset/api/execute.cc | 4 +- .../minddata/dataset/api/iterator.cc | 4 +- .../minddata/dataset/api/python/python_mp.h | 4 +- .../minddata/dataset/api/samplers.cc | 4 +- .../minddata/dataset/api/transforms.cc | 4 +- mindspore-lite/minddata/dataset/api/vision.cc | 4 +- .../dataset/callback/callback_manager.cc | 4 +- .../dataset/callback/callback_manager.h | 4 +- .../dataset/callback/callback_param.h | 4 +- .../minddata/dataset/callback/ds_callback.h | 4 +- .../dataset/callback/py_ds_callback.cc | 4 +- .../dataset/callback/py_ds_callback.h | 4 +- .../minddata/dataset/core/ascend_resource.cc | 4 +- .../minddata/dataset/core/ascend_resource.h | 4 +- .../minddata/dataset/core/client.cc | 4 +- mindspore-lite/minddata/dataset/core/client.h | 4 +- .../minddata/dataset/core/config_manager.cc | 4 +- .../minddata/dataset/core/config_manager.h | 4 +- .../minddata/dataset/core/cv_tensor.cc | 4 +- .../minddata/dataset/core/cv_tensor.h | 4 +- .../minddata/dataset/core/data_type.cc | 4 +- .../minddata/dataset/core/data_type.h | 4 +- .../minddata/dataset/core/de_tensor.cc | 4 +- .../minddata/dataset/core/de_tensor.h | 4 +- .../minddata/dataset/core/device_resource.cc | 4 +- .../minddata/dataset/core/device_resource.h | 4 +- .../minddata/dataset/core/device_tensor.cc | 4 +- .../minddata/dataset/core/device_tensor.h | 4 +- .../minddata/dataset/core/global_context.cc | 4 +- .../minddata/dataset/core/global_context.h | 4 +- .../minddata/dataset/core/tensor.cc | 4 +- mindspore-lite/minddata/dataset/core/tensor.h | 4 +- .../minddata/dataset/core/tensor_helpers.cc | 4 +- .../minddata/dataset/core/tensor_helpers.h | 4 +- .../minddata/dataset/core/tensor_row.cc | 4 +- .../minddata/dataset/core/tensor_row.h | 4 +- .../minddata/dataset/core/tensor_shape.cc | 4 +- .../minddata/dataset/core/tensor_shape.h | 4 +- .../minddata/dataset/core/type_id.h | 4 +- mindspore-lite/minddata/dataset/core/types.cc | 4 +- .../minddata/dataset/engine/connector.h | 4 +- .../consumers/pull_based_tree_consumer.cc | 4 +- .../consumers/pull_based_tree_consumer.h | 4 +- .../dataset/engine/consumers/tree_consumer.cc | 4 +- .../dataset/engine/consumers/tree_consumer.h | 4 +- .../minddata/dataset/engine/data_schema.cc | 4 +- .../minddata/dataset/engine/data_schema.h | 4 +- .../dataset/engine/dataset_iterator.cc | 4 +- .../dataset/engine/dataset_iterator.h | 4 +- .../dataset/engine/datasetops/barrier_op.cc | 4 +- .../dataset/engine/datasetops/barrier_op.h | 4 +- .../dataset/engine/datasetops/batch_op.cc | 4 +- .../dataset/engine/datasetops/batch_op.h | 4 +- .../datasetops/bucket_batch_by_length_op.cc | 4 +- .../datasetops/bucket_batch_by_length_op.h | 4 +- .../build_sentence_piece_vocab_op.cc | 4 +- .../build_sentence_piece_vocab_op.h | 4 +- .../engine/datasetops/build_vocab_op.cc | 4 +- .../engine/datasetops/build_vocab_op.h | 4 +- .../engine/datasetops/cache_base_op.cc | 4 +- .../dataset/engine/datasetops/cache_base_op.h | 4 +- .../engine/datasetops/cache_lookup_op.cc | 4 +- .../engine/datasetops/cache_lookup_op.h | 4 +- .../engine/datasetops/cache_merge_op.cc | 4 +- .../engine/datasetops/cache_merge_op.h | 4 +- .../dataset/engine/datasetops/cache_op.cc | 4 +- .../dataset/engine/datasetops/cache_op.h | 4 +- .../dataset/engine/datasetops/concat_op.cc | 4 +- .../dataset/engine/datasetops/concat_op.h | 4 +- .../engine/datasetops/data_queue_op.cc | 4 +- .../dataset/engine/datasetops/data_queue_op.h | 4 +- .../dataset/engine/datasetops/dataset_op.cc | 4 +- .../dataset/engine/datasetops/dataset_op.h | 4 +- .../engine/datasetops/epoch_ctrl_op.cc | 4 +- .../dataset/engine/datasetops/epoch_ctrl_op.h | 4 +- .../dataset/engine/datasetops/filter_op.cc | 4 +- .../dataset/engine/datasetops/filter_op.h | 4 +- .../engine/datasetops/map_op/cpu_map_job.cc | 4 +- .../engine/datasetops/map_op/cpu_map_job.h | 4 +- .../engine/datasetops/map_op/gpu_map_job.cc | 4 +- .../engine/datasetops/map_op/gpu_map_job.h | 4 +- .../engine/datasetops/map_op/map_job.h | 4 +- .../engine/datasetops/map_op/map_op.cc | 4 +- .../dataset/engine/datasetops/map_op/map_op.h | 4 +- .../engine/datasetops/map_op/npu_map_job.cc | 4 +- .../engine/datasetops/map_op/npu_map_job.h | 4 +- .../dataset/engine/datasetops/parallel_op.h | 4 +- .../dataset/engine/datasetops/pipeline_op.cc | 4 +- .../dataset/engine/datasetops/pipeline_op.h | 4 +- .../dataset/engine/datasetops/project_op.cc | 4 +- .../dataset/engine/datasetops/project_op.h | 4 +- .../engine/datasetops/receive_bridge_op.cc | 4 +- .../engine/datasetops/receive_bridge_op.h | 4 +- .../dataset/engine/datasetops/rename_op.cc | 4 +- .../dataset/engine/datasetops/rename_op.h | 4 +- .../dataset/engine/datasetops/repeat_op.cc | 4 +- .../dataset/engine/datasetops/repeat_op.h | 4 +- .../engine/datasetops/send_bridge_op.cc | 4 +- .../engine/datasetops/send_bridge_op.h | 4 +- .../dataset/engine/datasetops/shuffle_op.cc | 4 +- .../dataset/engine/datasetops/shuffle_op.h | 4 +- .../dataset/engine/datasetops/skip_op.cc | 4 +- .../dataset/engine/datasetops/skip_op.h | 4 +- .../engine/datasetops/source/ag_news_op.cc | 110 +- .../engine/datasetops/source/ag_news_op.h | 154 +- .../engine/datasetops/source/album_op.cc | 4 +- .../engine/datasetops/source/album_op.h | 4 +- .../datasetops/source/amazon_review_op.cc | 100 +- .../datasetops/source/amazon_review_op.h | 142 +- .../engine/datasetops/source/caltech_op.cc | 64 +- .../engine/datasetops/source/caltech_op.h | 4 +- .../engine/datasetops/source/celeba_op.cc | 4 +- .../engine/datasetops/source/celeba_op.h | 4 +- .../engine/datasetops/source/cifar_op.cc | 4 +- .../engine/datasetops/source/cifar_op.h | 4 +- .../engine/datasetops/source/cityscapes_op.cc | 4 +- .../engine/datasetops/source/cityscapes_op.h | 4 +- .../engine/datasetops/source/clue_op.cc | 4 +- .../engine/datasetops/source/clue_op.h | 4 +- .../engine/datasetops/source/cmu_arctic_op.cc | 342 +- .../engine/datasetops/source/cmu_arctic_op.h | 198 +- .../engine/datasetops/source/coco_op.cc | 4 +- .../engine/datasetops/source/coco_op.h | 4 +- .../engine/datasetops/source/conll2000_op.cc | 4 +- .../engine/datasetops/source/conll2000_op.h | 4 +- .../engine/datasetops/source/csv_op.cc | 4 +- .../dataset/engine/datasetops/source/csv_op.h | 4 +- .../engine/datasetops/source/dbpedia_op.cc | 4 +- .../engine/datasetops/source/dbpedia_op.h | 4 +- .../engine/datasetops/source/div2k_op.cc | 4 +- .../engine/datasetops/source/div2k_op.h | 4 +- .../engine/datasetops/source/emnist_op.cc | 4 +- .../engine/datasetops/source/emnist_op.h | 4 +- .../engine/datasetops/source/en_wik9_op.cc | 4 +- .../engine/datasetops/source/en_wik9_op.h | 4 +- .../engine/datasetops/source/fake_image_op.cc | 4 +- .../engine/datasetops/source/fake_image_op.h | 4 +- .../datasetops/source/fashion_mnist_op.cc | 4 +- .../datasetops/source/fashion_mnist_op.h | 4 +- .../engine/datasetops/source/flickr_op.cc | 4 +- .../engine/datasetops/source/flickr_op.h | 4 +- .../engine/datasetops/source/food101_op.cc | 4 +- .../engine/datasetops/source/food101_op.h | 4 +- .../engine/datasetops/source/generator_op.cc | 4 +- .../engine/datasetops/source/generator_op.h | 4 +- .../engine/datasetops/source/gtzan_op.cc | 670 ++-- .../engine/datasetops/source/gtzan_op.h | 194 +- .../datasetops/source/image_folder_op.cc | 4 +- .../datasetops/source/image_folder_op.h | 4 +- .../engine/datasetops/source/imdb_op.cc | 4 +- .../engine/datasetops/source/imdb_op.h | 4 +- .../engine/datasetops/source/io_block.cc | 4 +- .../engine/datasetops/source/io_block.h | 4 +- .../engine/datasetops/source/iwslt_op.cc | 4 +- .../engine/datasetops/source/iwslt_op.h | 4 +- .../engine/datasetops/source/kitti_op.cc | 4 +- .../engine/datasetops/source/kitti_op.h | 4 +- .../engine/datasetops/source/kmnist_op.cc | 4 +- .../engine/datasetops/source/kmnist_op.h | 4 +- .../engine/datasetops/source/lfw_op.cc | 4 +- .../dataset/engine/datasetops/source/lfw_op.h | 4 +- .../engine/datasetops/source/libri_tts_op.cc | 468 +-- .../engine/datasetops/source/libri_tts_op.h | 240 +- .../engine/datasetops/source/lj_speech_op.cc | 4 +- .../engine/datasetops/source/lj_speech_op.h | 4 +- .../engine/datasetops/source/lsun_op.cc | 4 +- .../engine/datasetops/source/lsun_op.h | 4 +- .../engine/datasetops/source/manifest_op.cc | 4 +- .../engine/datasetops/source/manifest_op.h | 4 +- .../datasetops/source/mappable_leaf_op.cc | 4 +- .../datasetops/source/mappable_leaf_op.h | 4 +- .../engine/datasetops/source/mindrecord_op.cc | 4 +- .../engine/datasetops/source/mindrecord_op.h | 4 +- .../engine/datasetops/source/mnist_op.cc | 4 +- .../engine/datasetops/source/mnist_op.h | 4 +- .../engine/datasetops/source/multi30k_op.cc | 4 +- .../engine/datasetops/source/multi30k_op.h | 4 +- .../datasetops/source/nonmappable_leaf_op.cc | 4 +- .../datasetops/source/nonmappable_leaf_op.h | 4 +- .../engine/datasetops/source/omniglot_op.cc | 4 +- .../engine/datasetops/source/omniglot_op.h | 4 +- .../datasetops/source/penn_treebank_op.cc | 110 +- .../datasetops/source/penn_treebank_op.h | 138 +- .../engine/datasetops/source/photo_tour_op.cc | 4 +- .../engine/datasetops/source/photo_tour_op.h | 4 +- .../engine/datasetops/source/places365_op.cc | 4 +- .../engine/datasetops/source/places365_op.h | 4 +- .../engine/datasetops/source/qmnist_op.cc | 4 +- .../engine/datasetops/source/qmnist_op.h | 226 +- .../datasetops/source/random_data_op.cc | 4 +- .../engine/datasetops/source/random_data_op.h | 4 +- .../datasetops/source/rendered_sst2_op.cc | 4 +- .../datasetops/source/rendered_sst2_op.h | 4 +- .../source/sampler/distributed_sampler.cc | 4 +- .../source/sampler/distributed_sampler.h | 4 +- .../datasetops/source/sampler/pk_sampler.cc | 4 +- .../datasetops/source/sampler/pk_sampler.h | 4 +- .../source/sampler/random_sampler.cc | 4 +- .../source/sampler/random_sampler.h | 4 +- .../datasetops/source/sampler/sampler.cc | 4 +- .../datasetops/source/sampler/sampler.h | 4 +- .../source/sampler/sequential_sampler.cc | 4 +- .../source/sampler/sequential_sampler.h | 4 +- .../sampler/skip_first_epoch_sampler.cc | 4 +- .../source/sampler/skip_first_epoch_sampler.h | 4 +- .../source/sampler/subset_random_sampler.cc | 4 +- .../source/sampler/subset_random_sampler.h | 4 +- .../source/sampler/subset_sampler.cc | 4 +- .../source/sampler/subset_sampler.h | 4 +- .../source/sampler/weighted_random_sampler.cc | 4 +- .../source/sampler/weighted_random_sampler.h | 4 +- .../engine/datasetops/source/sbu_op.cc | 4 +- .../dataset/engine/datasetops/source/sbu_op.h | 4 +- .../engine/datasetops/source/semeion_op.cc | 4 +- .../engine/datasetops/source/semeion_op.h | 4 +- .../engine/datasetops/source/sogou_news_op.cc | 104 +- .../engine/datasetops/source/sogou_news_op.h | 142 +- .../datasetops/source/speech_commands_op.cc | 4 +- .../datasetops/source/speech_commands_op.h | 4 +- .../engine/datasetops/source/squad_op.cc | 4 +- .../engine/datasetops/source/squad_op.h | 4 +- .../engine/datasetops/source/sst2_op.cc | 4 +- .../engine/datasetops/source/sst2_op.h | 4 +- .../engine/datasetops/source/stl10_op.cc | 4 +- .../engine/datasetops/source/stl10_op.h | 4 +- .../engine/datasetops/source/sun397_op.cc | 4 +- .../engine/datasetops/source/sun397_op.h | 4 +- .../engine/datasetops/source/tedlium_op.cc | 626 ++-- .../engine/datasetops/source/tedlium_op.h | 252 +- .../engine/datasetops/source/text_file_op.cc | 4 +- .../engine/datasetops/source/text_file_op.h | 4 +- .../engine/datasetops/source/tf_reader_op.cc | 4 +- .../engine/datasetops/source/tf_reader_op.h | 4 +- .../engine/datasetops/source/udpos_op.cc | 4 +- .../engine/datasetops/source/udpos_op.h | 4 +- .../engine/datasetops/source/usps_op.cc | 4 +- .../engine/datasetops/source/usps_op.h | 274 +- .../engine/datasetops/source/voc_op.cc | 4 +- .../dataset/engine/datasetops/source/voc_op.h | 4 +- .../engine/datasetops/source/wider_face_op.cc | 4 +- .../engine/datasetops/source/wider_face_op.h | 4 +- .../engine/datasetops/source/wiki_text_op.cc | 4 +- .../engine/datasetops/source/wiki_text_op.h | 4 +- .../datasetops/source/yahoo_answers_op.cc | 4 +- .../datasetops/source/yahoo_answers_op.h | 4 +- .../datasetops/source/yelp_review_op.cc | 4 +- .../engine/datasetops/source/yelp_review_op.h | 4 +- .../engine/datasetops/source/yes_no_op.cc | 4 +- .../engine/datasetops/source/yes_no_op.h | 4 +- .../dataset/engine/datasetops/take_op.cc | 4 +- .../dataset/engine/datasetops/take_op.h | 4 +- .../dataset/engine/datasetops/zip_op.cc | 4 +- .../dataset/engine/datasetops/zip_op.h | 4 +- .../minddata/dataset/engine/execution_tree.cc | 4 +- .../minddata/dataset/engine/execution_tree.h | 4 +- .../dataset/engine/gpu_item_connector.h | 4 +- .../dataset/engine/ir/cache/dataset_cache.cc | 4 +- .../dataset/engine/ir/cache/dataset_cache.h | 4 +- .../engine/ir/cache/dataset_cache_impl.cc | 4 +- .../engine/ir/cache/dataset_cache_impl.h | 4 +- .../ir/cache/pre_built_dataset_cache.cc | 4 +- .../engine/ir/cache/pre_built_dataset_cache.h | 4 +- .../engine/ir/datasetops/batch_node.cc | 4 +- .../dataset/engine/ir/datasetops/batch_node.h | 4 +- .../datasetops/bucket_batch_by_length_node.cc | 4 +- .../datasetops/bucket_batch_by_length_node.h | 4 +- .../build_sentence_piece_vocab_node.cc | 4 +- .../build_sentence_piece_vocab_node.h | 4 +- .../engine/ir/datasetops/build_vocab_node.cc | 4 +- .../engine/ir/datasetops/build_vocab_node.h | 4 +- .../engine/ir/datasetops/cache_lookup_node.cc | 4 +- .../engine/ir/datasetops/cache_lookup_node.h | 4 +- .../engine/ir/datasetops/cache_merge_node.cc | 4 +- .../engine/ir/datasetops/cache_merge_node.h | 4 +- .../engine/ir/datasetops/cache_node.cc | 4 +- .../dataset/engine/ir/datasetops/cache_node.h | 4 +- .../engine/ir/datasetops/concat_node.cc | 4 +- .../engine/ir/datasetops/concat_node.h | 4 +- .../engine/ir/datasetops/data_queue_node.cc | 4 +- .../engine/ir/datasetops/data_queue_node.h | 4 +- .../engine/ir/datasetops/dataset_node.cc | 4 +- .../engine/ir/datasetops/dataset_node.h | 4 +- .../engine/ir/datasetops/epoch_ctrl_node.cc | 4 +- .../engine/ir/datasetops/epoch_ctrl_node.h | 4 +- .../engine/ir/datasetops/filter_node.cc | 4 +- .../engine/ir/datasetops/filter_node.h | 4 +- .../dataset/engine/ir/datasetops/map_node.cc | 4 +- .../dataset/engine/ir/datasetops/map_node.h | 4 +- .../engine/ir/datasetops/project_node.cc | 4 +- .../engine/ir/datasetops/project_node.h | 4 +- .../engine/ir/datasetops/rename_node.cc | 4 +- .../engine/ir/datasetops/rename_node.h | 4 +- .../engine/ir/datasetops/repeat_node.cc | 4 +- .../engine/ir/datasetops/repeat_node.h | 4 +- .../dataset/engine/ir/datasetops/root_node.cc | 4 +- .../dataset/engine/ir/datasetops/root_node.h | 4 +- .../engine/ir/datasetops/shuffle_node.cc | 4 +- .../engine/ir/datasetops/shuffle_node.h | 4 +- .../dataset/engine/ir/datasetops/skip_node.cc | 4 +- .../dataset/engine/ir/datasetops/skip_node.h | 4 +- .../ir/datasetops/source/ag_news_node.cc | 398 +-- .../ir/datasetops/source/ag_news_node.h | 252 +- .../engine/ir/datasetops/source/album_node.cc | 4 +- .../engine/ir/datasetops/source/album_node.h | 4 +- .../datasetops/source/amazon_review_node.cc | 390 +-- .../ir/datasetops/source/amazon_review_node.h | 240 +- .../ir/datasetops/source/caltech256_node.cc | 4 +- .../ir/datasetops/source/caltech256_node.h | 4 +- .../ir/datasetops/source/celeba_node.cc | 4 +- .../engine/ir/datasetops/source/celeba_node.h | 4 +- .../ir/datasetops/source/cifar100_node.cc | 4 +- .../ir/datasetops/source/cifar100_node.h | 4 +- .../ir/datasetops/source/cifar10_node.cc | 4 +- .../ir/datasetops/source/cifar10_node.h | 4 +- .../ir/datasetops/source/cityscapes_node.cc | 4 +- .../ir/datasetops/source/cityscapes_node.h | 4 +- .../engine/ir/datasetops/source/clue_node.cc | 4 +- .../engine/ir/datasetops/source/clue_node.h | 4 +- .../ir/datasetops/source/cmu_arctic_node.cc | 232 +- .../ir/datasetops/source/cmu_arctic_node.h | 190 +- .../engine/ir/datasetops/source/coco_node.cc | 4 +- .../engine/ir/datasetops/source/coco_node.h | 4 +- .../ir/datasetops/source/conll2000_node.cc | 4 +- .../ir/datasetops/source/conll2000_node.h | 4 +- .../engine/ir/datasetops/source/csv_node.cc | 4 +- .../engine/ir/datasetops/source/csv_node.h | 4 +- .../ir/datasetops/source/dbpedia_node.cc | 4 +- .../ir/datasetops/source/dbpedia_node.h | 4 +- .../engine/ir/datasetops/source/div2k_node.cc | 4 +- .../engine/ir/datasetops/source/div2k_node.h | 4 +- .../ir/datasetops/source/emnist_node.cc | 4 +- .../engine/ir/datasetops/source/emnist_node.h | 4 +- .../ir/datasetops/source/en_wik9_node.cc | 4 +- .../ir/datasetops/source/en_wik9_node.h | 4 +- .../ir/datasetops/source/fake_image_node.cc | 4 +- .../ir/datasetops/source/fake_image_node.h | 4 +- .../datasetops/source/fashion_mnist_node.cc | 4 +- .../ir/datasetops/source/fashion_mnist_node.h | 4 +- .../ir/datasetops/source/flickr_node.cc | 4 +- .../engine/ir/datasetops/source/flickr_node.h | 4 +- .../ir/datasetops/source/food101_node.cc | 4 +- .../ir/datasetops/source/food101_node.h | 4 +- .../ir/datasetops/source/generator_node.cc | 4 +- .../ir/datasetops/source/generator_node.h | 4 +- .../engine/ir/datasetops/source/gtzan_node.cc | 220 +- .../engine/ir/datasetops/source/gtzan_node.h | 190 +- .../ir/datasetops/source/image_folder_node.cc | 4 +- .../ir/datasetops/source/image_folder_node.h | 4 +- .../engine/ir/datasetops/source/imdb_node.cc | 4 +- .../engine/ir/datasetops/source/imdb_node.h | 4 +- .../ir/datasetops/source/iwslt2016_node.cc | 4 +- .../ir/datasetops/source/iwslt2016_node.h | 4 +- .../ir/datasetops/source/iwslt2017_node.cc | 4 +- .../ir/datasetops/source/iwslt2017_node.h | 4 +- .../engine/ir/datasetops/source/kitti_node.cc | 4 +- .../engine/ir/datasetops/source/kitti_node.h | 4 +- .../ir/datasetops/source/kmnist_node.cc | 4 +- .../engine/ir/datasetops/source/kmnist_node.h | 4 +- .../engine/ir/datasetops/source/lfw_node.cc | 4 +- .../engine/ir/datasetops/source/lfw_node.h | 4 +- .../ir/datasetops/source/libri_tts_node.cc | 242 +- .../ir/datasetops/source/libri_tts_node.h | 190 +- .../ir/datasetops/source/lj_speech_node.cc | 4 +- .../ir/datasetops/source/lj_speech_node.h | 4 +- .../engine/ir/datasetops/source/lsun_node.cc | 4 +- .../engine/ir/datasetops/source/lsun_node.h | 4 +- .../ir/datasetops/source/manifest_node.cc | 4 +- .../ir/datasetops/source/manifest_node.h | 4 +- .../ir/datasetops/source/minddata_node.cc | 4 +- .../ir/datasetops/source/minddata_node.h | 4 +- .../engine/ir/datasetops/source/mnist_node.cc | 4 +- .../engine/ir/datasetops/source/mnist_node.h | 4 +- .../ir/datasetops/source/multi30k_node.cc | 4 +- .../ir/datasetops/source/multi30k_node.h | 4 +- .../ir/datasetops/source/omniglot_node.cc | 4 +- .../ir/datasetops/source/omniglot_node.h | 4 +- .../datasetops/source/penn_treebank_node.cc | 400 +-- .../ir/datasetops/source/penn_treebank_node.h | 248 +- .../ir/datasetops/source/photo_tour_node.cc | 4 +- .../ir/datasetops/source/photo_tour_node.h | 4 +- .../ir/datasetops/source/places365_node.cc | 4 +- .../ir/datasetops/source/places365_node.h | 4 +- .../ir/datasetops/source/qmnist_node.cc | 308 +- .../engine/ir/datasetops/source/qmnist_node.h | 218 +- .../ir/datasetops/source/random_node.cc | 4 +- .../engine/ir/datasetops/source/random_node.h | 4 +- .../datasetops/source/rendered_sst2_node.cc | 4 +- .../ir/datasetops/source/rendered_sst2_node.h | 4 +- .../source/samplers/distributed_sampler_ir.cc | 4 +- .../source/samplers/distributed_sampler_ir.h | 4 +- .../source/samplers/pk_sampler_ir.cc | 4 +- .../source/samplers/pk_sampler_ir.h | 4 +- .../source/samplers/prebuilt_sampler_ir.cc | 4 +- .../source/samplers/prebuilt_sampler_ir.h | 4 +- .../source/samplers/random_sampler_ir.cc | 4 +- .../source/samplers/random_sampler_ir.h | 4 +- .../datasetops/source/samplers/samplers_ir.cc | 4 +- .../datasetops/source/samplers/samplers_ir.h | 4 +- .../source/samplers/sequential_sampler_ir.cc | 4 +- .../source/samplers/sequential_sampler_ir.h | 4 +- .../samplers/skip_first_epoch_sampler_ir.cc | 4 +- .../samplers/skip_first_epoch_sampler_ir.h | 4 +- .../samplers/subset_random_sampler_ir.cc | 4 +- .../samplers/subset_random_sampler_ir.h | 4 +- .../source/samplers/subset_sampler_ir.cc | 4 +- .../source/samplers/subset_sampler_ir.h | 4 +- .../samplers/weighted_random_sampler_ir.cc | 4 +- .../samplers/weighted_random_sampler_ir.h | 4 +- .../engine/ir/datasetops/source/sbu_node.cc | 250 +- .../engine/ir/datasetops/source/sbu_node.h | 190 +- .../ir/datasetops/source/semeion_node.cc | 4 +- .../ir/datasetops/source/semeion_node.h | 4 +- .../ir/datasetops/source/sogou_news_node.cc | 392 +-- .../ir/datasetops/source/sogou_news_node.h | 270 +- .../datasetops/source/speech_commands_node.cc | 4 +- .../datasetops/source/speech_commands_node.h | 4 +- .../engine/ir/datasetops/source/squad_node.cc | 4 +- .../engine/ir/datasetops/source/squad_node.h | 4 +- .../engine/ir/datasetops/source/sst2_node.cc | 4 +- .../engine/ir/datasetops/source/sst2_node.h | 4 +- .../engine/ir/datasetops/source/stl10_node.cc | 4 +- .../engine/ir/datasetops/source/stl10_node.h | 4 +- .../ir/datasetops/source/sun397_node.cc | 4 +- .../engine/ir/datasetops/source/sun397_node.h | 4 +- .../ir/datasetops/source/tedlium_node.cc | 4 +- .../ir/datasetops/source/tedlium_node.h | 4 +- .../ir/datasetops/source/text_file_node.cc | 4 +- .../ir/datasetops/source/text_file_node.h | 4 +- .../ir/datasetops/source/tf_record_node.cc | 4 +- .../ir/datasetops/source/tf_record_node.h | 4 +- .../engine/ir/datasetops/source/udpos_node.cc | 4 +- .../engine/ir/datasetops/source/udpos_node.h | 4 +- .../engine/ir/datasetops/source/usps_node.cc | 340 +- .../engine/ir/datasetops/source/usps_node.h | 240 +- .../engine/ir/datasetops/source/voc_node.cc | 4 +- .../engine/ir/datasetops/source/voc_node.h | 4 +- .../ir/datasetops/source/wider_face_node.cc | 4 +- .../ir/datasetops/source/wider_face_node.h | 4 +- .../ir/datasetops/source/wiki_text_node.cc | 4 +- .../ir/datasetops/source/wiki_text_node.h | 4 +- .../datasetops/source/yahoo_answers_node.cc | 4 +- .../ir/datasetops/source/yahoo_answers_node.h | 4 +- .../ir/datasetops/source/yelp_review_node.cc | 4 +- .../ir/datasetops/source/yelp_review_node.h | 4 +- .../ir/datasetops/source/yes_no_node.cc | 4 +- .../engine/ir/datasetops/source/yes_no_node.h | 4 +- .../engine/ir/datasetops/sync_wait_node.cc | 4 +- .../engine/ir/datasetops/sync_wait_node.h | 4 +- .../dataset/engine/ir/datasetops/take_node.cc | 4 +- .../dataset/engine/ir/datasetops/take_node.h | 4 +- .../dataset/engine/ir/datasetops/zip_node.cc | 4 +- .../dataset/engine/ir/datasetops/zip_node.h | 4 +- .../dataset/engine/jagged_connector.h | 4 +- .../dataset/engine/operator_connector.h | 4 +- .../minddata/dataset/engine/opt/pass.cc | 4 +- .../minddata/dataset/engine/opt/pass.h | 4 +- .../engine/opt/post/auto_worker_pass.cc | 4 +- .../engine/opt/post/auto_worker_pass.h | 4 +- .../dataset/engine/opt/pre/add_skip_pass.cc | 4 +- .../dataset/engine/opt/pre/add_skip_pass.h | 4 +- .../engine/opt/pre/cache_validation_pass.cc | 4 +- .../engine/opt/pre/cache_validation_pass.h | 4 +- .../dataset/engine/opt/pre/debug_mode_pass.cc | 4 +- .../dataset/engine/opt/pre/debug_mode_pass.h | 4 +- .../dataset/engine/opt/pre/deep_copy_pass.cc | 4 +- .../dataset/engine/opt/pre/deep_copy_pass.h | 4 +- .../dataset/engine/opt/pre/epoch_ctrl_pass.cc | 4 +- .../dataset/engine/opt/pre/epoch_ctrl_pass.h | 4 +- .../dataset/engine/opt/pre/getter_pass.cc | 4 +- .../dataset/engine/opt/pre/getter_pass.h | 4 +- .../engine/opt/pre/input_validation_pass.cc | 4 +- .../engine/opt/pre/input_validation_pass.h | 4 +- .../dataset/engine/opt/pre/insert_map_pass.cc | 4 +- .../dataset/engine/opt/pre/insert_map_pass.h | 4 +- .../engine/opt/pre/node_removal_pass.cc | 4 +- .../engine/opt/pre/node_removal_pass.h | 4 +- .../engine/opt/pre/skip_pushdown_pass.cc | 4 +- .../engine/opt/pre/skip_pushdown_pass.h | 4 +- .../minddata/dataset/engine/perf/auto_tune.cc | 4 +- .../minddata/dataset/engine/perf/auto_tune.h | 4 +- .../dataset/engine/perf/connector_size.cc | 4 +- .../dataset/engine/perf/connector_size.h | 4 +- .../dataset/engine/perf/cpu_sampler.cc | 4 +- .../dataset/engine/perf/cpu_sampler.h | 4 +- .../dataset/engine/perf/cyclic_array.h | 4 +- .../engine/perf/dataset_iterator_tracing.cc | 4 +- .../engine/perf/dataset_iterator_tracing.h | 4 +- .../engine/perf/device_queue_tracing.cc | 4 +- .../engine/perf/device_queue_tracing.h | 4 +- .../dataset/engine/perf/info_collector.cc | 4 +- .../dataset/engine/perf/info_collector.h | 4 +- .../minddata/dataset/engine/perf/monitor.cc | 4 +- .../minddata/dataset/engine/perf/monitor.h | 4 +- .../minddata/dataset/engine/perf/perf_data.h | 4 +- .../minddata/dataset/engine/perf/profiling.cc | 4 +- .../minddata/dataset/engine/perf/profiling.h | 4 +- .../dataset/engine/python_runtime_context.cc | 4 +- .../dataset/engine/python_runtime_context.h | 4 +- .../dataset/engine/runtime_context.cc | 4 +- .../minddata/dataset/engine/runtime_context.h | 4 +- .../minddata/dataset/engine/serdes.cc | 4 +- .../minddata/dataset/engine/serdes.h | 4 +- .../minddata/dataset/engine/tree_adapter.cc | 4 +- .../minddata/dataset/engine/tree_adapter.h | 4 +- .../dataset/engine/tree_adapter_lite.cc | 4 +- .../dataset/engine/tree_adapter_lite.h | 4 +- .../minddata/dataset/engine/tree_modifier.cc | 4 +- .../minddata/dataset/engine/tree_modifier.h | 4 +- .../minddata/dataset/include/dataset/audio.h | 2874 ++++++++--------- .../minddata/dataset/include/dataset/config.h | 4 +- .../dataset/include/dataset/constants.h | 4 +- .../dataset/include/dataset/data_helper.h | 4 +- .../dataset/include/dataset/datasets.h | 4 +- .../dataset/include/dataset/execute.h | 4 +- .../dataset/include/dataset/iterator.h | 4 +- .../dataset/include/dataset/samplers.h | 4 +- .../minddata/dataset/include/dataset/text.h | 4 +- .../dataset/include/dataset/transforms.h | 4 +- .../minddata/dataset/include/dataset/vision.h | 4 +- .../dataset/include/dataset/vision_ascend.h | 4 +- .../dataset/include/dataset/vision_lite.h | 4 +- .../minddata/dataset/kernels/c_func_op.cc | 4 +- .../minddata/dataset/kernels/c_func_op.h | 4 +- .../dataset/kernels/data/compose_op.cc | 4 +- .../dataset/kernels/data/compose_op.h | 4 +- .../dataset/kernels/data/concatenate_op.cc | 4 +- .../dataset/kernels/data/concatenate_op.h | 4 +- .../dataset/kernels/data/data_utils.cc | 4 +- .../dataset/kernels/data/data_utils.h | 4 +- .../dataset/kernels/data/duplicate_op.cc | 4 +- .../dataset/kernels/data/duplicate_op.h | 4 +- .../minddata/dataset/kernels/data/fill_op.cc | 4 +- .../minddata/dataset/kernels/data/fill_op.h | 4 +- .../minddata/dataset/kernels/data/mask_op.cc | 4 +- .../minddata/dataset/kernels/data/mask_op.h | 4 +- .../minddata/dataset/kernels/data/no_op.h | 4 +- .../dataset/kernels/data/one_hot_op.cc | 4 +- .../dataset/kernels/data/one_hot_op.h | 4 +- .../dataset/kernels/data/pad_end_op.cc | 4 +- .../dataset/kernels/data/pad_end_op.h | 4 +- .../dataset/kernels/data/parse_example_op.cc | 4 +- .../dataset/kernels/data/parse_example_op.h | 4 +- .../dataset/kernels/data/random_apply_op.cc | 4 +- .../dataset/kernels/data/random_apply_op.h | 4 +- .../dataset/kernels/data/random_choice_op.cc | 4 +- .../dataset/kernels/data/random_choice_op.h | 4 +- .../minddata/dataset/kernels/data/slice_op.cc | 4 +- .../minddata/dataset/kernels/data/slice_op.h | 4 +- .../dataset/kernels/data/to_float16_op.cc | 4 +- .../dataset/kernels/data/to_float16_op.h | 4 +- .../dataset/kernels/data/type_cast_op.cc | 4 +- .../dataset/kernels/data/type_cast_op.h | 4 +- .../dataset/kernels/data/unique_op.cc | 4 +- .../minddata/dataset/kernels/data/unique_op.h | 4 +- .../kernels/image/adjust_brightness_op.cc | 4 +- .../kernels/image/adjust_brightness_op.h | 4 +- .../kernels/image/adjust_contrast_op.cc | 4 +- .../kernels/image/adjust_contrast_op.h | 4 +- .../dataset/kernels/image/adjust_gamma_op.cc | 4 +- .../dataset/kernels/image/adjust_gamma_op.h | 4 +- .../dataset/kernels/image/adjust_hue_op.cc | 4 +- .../dataset/kernels/image/adjust_hue_op.h | 4 +- .../kernels/image/adjust_saturation_op.cc | 4 +- .../kernels/image/adjust_saturation_op.h | 4 +- .../dataset/kernels/image/affine_op.cc | 4 +- .../dataset/kernels/image/affine_op.h | 4 +- .../dataset/kernels/image/auto_augment_op.cc | 4 +- .../dataset/kernels/image/auto_augment_op.h | 4 +- .../dataset/kernels/image/auto_contrast_op.cc | 4 +- .../dataset/kernels/image/auto_contrast_op.h | 4 +- .../dataset/kernels/image/bounding_box.cc | 4 +- .../dataset/kernels/image/bounding_box.h | 4 +- .../kernels/image/bounding_box_augment_op.cc | 4 +- .../kernels/image/bounding_box_augment_op.h | 4 +- .../dataset/kernels/image/center_crop_op.cc | 4 +- .../dataset/kernels/image/center_crop_op.h | 4 +- .../dataset/kernels/image/convert_color_op.cc | 4 +- .../dataset/kernels/image/convert_color_op.h | 4 +- .../minddata/dataset/kernels/image/crop_op.cc | 4 +- .../minddata/dataset/kernels/image/crop_op.h | 4 +- .../dataset/kernels/image/cut_out_op.cc | 4 +- .../dataset/kernels/image/cut_out_op.h | 4 +- .../dataset/kernels/image/cutmix_batch_op.cc | 4 +- .../dataset/kernels/image/cutmix_batch_op.h | 4 +- .../dataset/kernels/image/decode_op.cc | 4 +- .../dataset/kernels/image/decode_op.h | 4 +- .../dataset/kernels/image/decode_video_op.cc | 4 +- .../dataset/kernels/image/decode_video_op.h | 4 +- .../dataset/kernels/image/dvpp/acl_adapter.cc | 4 +- .../dataset/kernels/image/dvpp/acl_adapter.h | 4 +- .../image/dvpp/ascend310/dvpp_crop_jpeg_op.cc | 4 +- .../image/dvpp/ascend310/dvpp_crop_jpeg_op.h | 4 +- .../dvpp/ascend310/dvpp_decode_jpeg_op.cc | 4 +- .../dvpp/ascend310/dvpp_decode_jpeg_op.h | 4 +- .../dvpp/ascend310/dvpp_decode_png_op.cc | 4 +- .../image/dvpp/ascend310/dvpp_decode_png_op.h | 4 +- .../dvpp_decode_resize_crop_jpeg_op.cc | 4 +- .../dvpp_decode_resize_crop_jpeg_op.h | 4 +- .../ascend310/dvpp_decode_resize_jpeg_op.cc | 4 +- .../ascend310/dvpp_decode_resize_jpeg_op.h | 4 +- .../dvpp/ascend310/dvpp_decode_video_op.cc | 4 +- .../dvpp/ascend310/dvpp_decode_video_op.h | 4 +- .../image/dvpp/ascend310/dvpp_normalize_op.cc | 4 +- .../image/dvpp/ascend310/dvpp_normalize_op.h | 4 +- .../dvpp/ascend310/dvpp_resize_jpeg_op.cc | 4 +- .../dvpp/ascend310/dvpp_resize_jpeg_op.h | 4 +- .../kernels/image/dvpp/utils/acl_env_guard.cc | 4 +- .../kernels/image/dvpp/utils/acl_env_guard.h | 4 +- .../image/dvpp/utils/dvpp_image_utils.cc | 4 +- .../image/dvpp/utils/dvpp_image_utils.h | 4 +- .../dataset/kernels/image/equalize_op.cc | 4 +- .../dataset/kernels/image/equalize_op.h | 4 +- .../dataset/kernels/image/erase_op.cc | 4 +- .../minddata/dataset/kernels/image/erase_op.h | 4 +- .../dataset/kernels/image/exif_utils.cc | 4 +- .../dataset/kernels/image/exif_utils.h | 4 +- .../dataset/kernels/image/gaussian_blur_op.cc | 4 +- .../dataset/kernels/image/gaussian_blur_op.h | 4 +- .../kernels/image/horizontal_flip_op.cc | 4 +- .../kernels/image/horizontal_flip_op.h | 4 +- .../dataset/kernels/image/hwc_to_chw_op.cc | 4 +- .../dataset/kernels/image/hwc_to_chw_op.h | 4 +- .../dataset/kernels/image/image_utils.cc | 4 +- .../dataset/kernels/image/image_utils.h | 4 +- .../dataset/kernels/image/invert_op.cc | 4 +- .../dataset/kernels/image/invert_op.h | 4 +- .../dataset/kernels/image/lite_cv/canny.cc | 4 +- .../kernels/image/lite_cv/gaussian_blur.cc | 4 +- .../kernels/image/lite_cv/image_process.cc | 4 +- .../kernels/image/lite_cv/image_process.h | 4 +- .../dataset/kernels/image/lite_cv/lite_mat.cc | 4 +- .../dataset/kernels/image/lite_cv/lite_mat.h | 4 +- .../kernels/image/lite_cv/warp_affine.cc | 4 +- .../dataset/kernels/image/lite_image_utils.cc | 4 +- .../dataset/kernels/image/lite_image_utils.h | 4 +- .../dataset/kernels/image/math_utils.cc | 4 +- .../dataset/kernels/image/math_utils.h | 4 +- .../dataset/kernels/image/mixup_batch_op.cc | 4 +- .../dataset/kernels/image/mixup_batch_op.h | 4 +- .../dataset/kernels/image/normalize_op.cc | 4 +- .../dataset/kernels/image/normalize_op.h | 4 +- .../dataset/kernels/image/normalize_pad_op.cc | 4 +- .../dataset/kernels/image/normalize_pad_op.h | 4 +- .../minddata/dataset/kernels/image/pad_op.cc | 4 +- .../minddata/dataset/kernels/image/pad_op.h | 4 +- .../dataset/kernels/image/pad_to_size_op.cc | 4 +- .../dataset/kernels/image/pad_to_size_op.h | 4 +- .../dataset/kernels/image/perspective_op.cc | 4 +- .../dataset/kernels/image/perspective_op.h | 4 +- .../dataset/kernels/image/posterize_op.cc | 4 +- .../dataset/kernels/image/posterize_op.h | 4 +- .../dataset/kernels/image/rand_augment_op.cc | 4 +- .../dataset/kernels/image/rand_augment_op.h | 4 +- .../image/random_adjust_sharpness_op.cc | 4 +- .../image/random_adjust_sharpness_op.h | 4 +- .../dataset/kernels/image/random_affine_op.cc | 4 +- .../dataset/kernels/image/random_affine_op.h | 4 +- .../kernels/image/random_auto_contrast_op.cc | 4 +- .../kernels/image/random_auto_contrast_op.h | 4 +- .../kernels/image/random_color_adjust_op.cc | 4 +- .../kernels/image/random_color_adjust_op.h | 4 +- .../dataset/kernels/image/random_color_op.cc | 4 +- .../dataset/kernels/image/random_color_op.h | 4 +- .../image/random_crop_and_resize_op.cc | 4 +- .../kernels/image/random_crop_and_resize_op.h | 4 +- .../random_crop_and_resize_with_bbox_op.cc | 4 +- .../random_crop_and_resize_with_bbox_op.h | 4 +- .../image/random_crop_decode_resize_op.cc | 4 +- .../image/random_crop_decode_resize_op.h | 4 +- .../dataset/kernels/image/random_crop_op.cc | 4 +- .../dataset/kernels/image/random_crop_op.h | 4 +- .../kernels/image/random_crop_with_bbox_op.cc | 4 +- .../kernels/image/random_crop_with_bbox_op.h | 4 +- .../kernels/image/random_equalize_op.cc | 4 +- .../kernels/image/random_equalize_op.h | 4 +- .../image/random_horizontal_flip_op.cc | 4 +- .../kernels/image/random_horizontal_flip_op.h | 4 +- .../random_horizontal_flip_with_bbox_op.cc | 4 +- .../random_horizontal_flip_with_bbox_op.h | 4 +- .../dataset/kernels/image/random_invert_op.cc | 4 +- .../dataset/kernels/image/random_invert_op.h | 4 +- .../kernels/image/random_lighting_op.cc | 4 +- .../kernels/image/random_lighting_op.h | 4 +- .../kernels/image/random_posterize_op.cc | 4 +- .../kernels/image/random_posterize_op.h | 4 +- .../dataset/kernels/image/random_resize_op.cc | 4 +- .../dataset/kernels/image/random_resize_op.h | 4 +- .../image/random_resize_with_bbox_op.cc | 4 +- .../image/random_resize_with_bbox_op.h | 4 +- .../kernels/image/random_rotation_op.cc | 4 +- .../kernels/image/random_rotation_op.h | 4 +- .../image/random_select_subpolicy_op.cc | 4 +- .../image/random_select_subpolicy_op.h | 4 +- .../kernels/image/random_sharpness_op.cc | 4 +- .../kernels/image/random_sharpness_op.h | 4 +- .../kernels/image/random_solarize_op.cc | 4 +- .../kernels/image/random_solarize_op.h | 4 +- .../kernels/image/random_vertical_flip_op.cc | 4 +- .../kernels/image/random_vertical_flip_op.h | 4 +- .../random_vertical_flip_with_bbox_op.cc | 4 +- .../image/random_vertical_flip_with_bbox_op.h | 4 +- .../dataset/kernels/image/rescale_op.cc | 4 +- .../dataset/kernels/image/rescale_op.h | 4 +- .../kernels/image/resize_bilinear_op.h | 4 +- .../dataset/kernels/image/resize_cubic_op.cc | 4 +- .../dataset/kernels/image/resize_cubic_op.h | 4 +- .../dataset/kernels/image/resize_op.cc | 4 +- .../dataset/kernels/image/resize_op.h | 4 +- .../kernels/image/resize_preserve_ar_op.cc | 4 +- .../kernels/image/resize_preserve_ar_op.h | 4 +- .../kernels/image/resize_with_bbox_op.cc | 4 +- .../kernels/image/resize_with_bbox_op.h | 4 +- .../dataset/kernels/image/resized_crop_op.cc | 4 +- .../dataset/kernels/image/resized_crop_op.h | 4 +- .../dataset/kernels/image/rgb_to_bgr_op.cc | 4 +- .../dataset/kernels/image/rgb_to_bgr_op.h | 4 +- .../dataset/kernels/image/rgb_to_gray_op.cc | 4 +- .../dataset/kernels/image/rgb_to_gray_op.h | 4 +- .../dataset/kernels/image/rgba_to_bgr_op.cc | 4 +- .../dataset/kernels/image/rgba_to_bgr_op.h | 4 +- .../dataset/kernels/image/rgba_to_rgb_op.cc | 4 +- .../dataset/kernels/image/rgba_to_rgb_op.h | 4 +- .../dataset/kernels/image/rotate_op.cc | 4 +- .../dataset/kernels/image/rotate_op.h | 4 +- .../dataset/kernels/image/sharpness_op.cc | 4 +- .../dataset/kernels/image/sharpness_op.h | 4 +- .../dataset/kernels/image/slice_patches_op.cc | 4 +- .../dataset/kernels/image/slice_patches_op.h | 4 +- .../dataset/kernels/image/solarize_op.cc | 4 +- .../dataset/kernels/image/solarize_op.h | 4 +- .../dataset/kernels/image/swap_red_blue_op.cc | 4 +- .../dataset/kernels/image/swap_red_blue_op.h | 4 +- .../dataset/kernels/image/to_tensor_op.cc | 4 +- .../dataset/kernels/image/to_tensor_op.h | 4 +- .../kernels/image/trivial_augment_wide_op.cc | 4 +- .../kernels/image/trivial_augment_wide_op.h | 4 +- .../dataset/kernels/image/uniform_aug_op.cc | 4 +- .../dataset/kernels/image/uniform_aug_op.h | 4 +- .../dataset/kernels/image/vertical_flip_op.cc | 4 +- .../dataset/kernels/image/vertical_flip_op.h | 4 +- .../dataset/kernels/image/video_utils.cc | 4 +- .../dataset/kernels/image/video_utils.h | 4 +- .../dataset/kernels/ir/data/transforms_ir.cc | 4 +- .../dataset/kernels/ir/data/transforms_ir.h | 4 +- .../dataset/kernels/ir/tensor_operation.h | 4 +- .../dataset/kernels/ir/transforms_ir.cc | 4 +- .../minddata/dataset/kernels/ir/validators.cc | 4 +- .../minddata/dataset/kernels/ir/validators.h | 4 +- .../kernels/ir/vision/adjust_brightness_ir.cc | 4 +- .../kernels/ir/vision/adjust_brightness_ir.h | 4 +- .../kernels/ir/vision/adjust_contrast_ir.cc | 4 +- .../kernels/ir/vision/adjust_contrast_ir.h | 4 +- .../kernels/ir/vision/adjust_gamma_ir.cc | 4 +- .../kernels/ir/vision/adjust_gamma_ir.h | 4 +- .../kernels/ir/vision/adjust_hue_ir.cc | 4 +- .../dataset/kernels/ir/vision/adjust_hue_ir.h | 4 +- .../kernels/ir/vision/adjust_saturation_ir.cc | 4 +- .../kernels/ir/vision/adjust_saturation_ir.h | 4 +- .../kernels/ir/vision/adjust_sharpness_ir.cc | 4 +- .../kernels/ir/vision/adjust_sharpness_ir.h | 4 +- .../dataset/kernels/ir/vision/affine_ir.cc | 4 +- .../dataset/kernels/ir/vision/affine_ir.h | 4 +- .../kernels/ir/vision/ascend_vision_ir.cc | 4 +- .../kernels/ir/vision/ascend_vision_ir.h | 4 +- .../kernels/ir/vision/auto_augment_ir.cc | 4 +- .../kernels/ir/vision/auto_augment_ir.h | 4 +- .../kernels/ir/vision/auto_contrast_ir.cc | 4 +- .../kernels/ir/vision/auto_contrast_ir.h | 4 +- .../ir/vision/bounding_box_augment_ir.cc | 4 +- .../ir/vision/bounding_box_augment_ir.h | 4 +- .../kernels/ir/vision/center_crop_ir.cc | 4 +- .../kernels/ir/vision/center_crop_ir.h | 4 +- .../kernels/ir/vision/convert_color_ir.cc | 4 +- .../kernels/ir/vision/convert_color_ir.h | 4 +- .../dataset/kernels/ir/vision/crop_ir.cc | 4 +- .../dataset/kernels/ir/vision/crop_ir.h | 4 +- .../kernels/ir/vision/cutmix_batch_ir.cc | 4 +- .../kernels/ir/vision/cutmix_batch_ir.h | 4 +- .../dataset/kernels/ir/vision/cutout_ir.cc | 4 +- .../dataset/kernels/ir/vision/cutout_ir.h | 4 +- .../dataset/kernels/ir/vision/decode_ir.cc | 4 +- .../dataset/kernels/ir/vision/decode_ir.h | 4 +- .../kernels/ir/vision/decode_video_ir.cc | 4 +- .../kernels/ir/vision/decode_video_ir.h | 4 +- .../dataset/kernels/ir/vision/equalize_ir.cc | 4 +- .../dataset/kernels/ir/vision/equalize_ir.h | 4 +- .../dataset/kernels/ir/vision/erase_ir.cc | 4 +- .../dataset/kernels/ir/vision/erase_ir.h | 4 +- .../kernels/ir/vision/gaussian_blur_ir.cc | 4 +- .../kernels/ir/vision/gaussian_blur_ir.h | 4 +- .../kernels/ir/vision/horizontal_flip_ir.cc | 4 +- .../kernels/ir/vision/horizontal_flip_ir.h | 4 +- .../kernels/ir/vision/hwc_to_chw_ir.cc | 4 +- .../dataset/kernels/ir/vision/hwc_to_chw_ir.h | 4 +- .../dataset/kernels/ir/vision/invert_ir.cc | 4 +- .../dataset/kernels/ir/vision/invert_ir.h | 4 +- .../kernels/ir/vision/mixup_batch_ir.cc | 4 +- .../kernels/ir/vision/mixup_batch_ir.h | 4 +- .../dataset/kernels/ir/vision/normalize_ir.cc | 4 +- .../dataset/kernels/ir/vision/normalize_ir.h | 4 +- .../kernels/ir/vision/normalize_pad_ir.cc | 4 +- .../kernels/ir/vision/normalize_pad_ir.h | 4 +- .../dataset/kernels/ir/vision/pad_ir.cc | 4 +- .../dataset/kernels/ir/vision/pad_ir.h | 4 +- .../kernels/ir/vision/pad_to_size_ir.cc | 4 +- .../kernels/ir/vision/pad_to_size_ir.h | 4 +- .../kernels/ir/vision/perspective_ir.cc | 4 +- .../kernels/ir/vision/perspective_ir.h | 4 +- .../dataset/kernels/ir/vision/posterize_ir.cc | 198 +- .../dataset/kernels/ir/vision/posterize_ir.h | 118 +- .../kernels/ir/vision/rand_augment_ir.cc | 4 +- .../kernels/ir/vision/rand_augment_ir.h | 4 +- .../ir/vision/random_adjust_sharpness_ir.cc | 4 +- .../ir/vision/random_adjust_sharpness_ir.h | 4 +- .../kernels/ir/vision/random_affine_ir.cc | 4 +- .../kernels/ir/vision/random_affine_ir.h | 4 +- .../ir/vision/random_auto_contrast_ir.cc | 4 +- .../ir/vision/random_auto_contrast_ir.h | 4 +- .../ir/vision/random_color_adjust_ir.cc | 4 +- .../ir/vision/random_color_adjust_ir.h | 4 +- .../kernels/ir/vision/random_color_ir.cc | 4 +- .../kernels/ir/vision/random_color_ir.h | 4 +- .../ir/vision/random_crop_decode_resize_ir.cc | 4 +- .../ir/vision/random_crop_decode_resize_ir.h | 4 +- .../kernels/ir/vision/random_crop_ir.cc | 4 +- .../kernels/ir/vision/random_crop_ir.h | 4 +- .../ir/vision/random_crop_with_bbox_ir.cc | 4 +- .../ir/vision/random_crop_with_bbox_ir.h | 4 +- .../kernels/ir/vision/random_equalize_ir.cc | 4 +- .../kernels/ir/vision/random_equalize_ir.h | 4 +- .../ir/vision/random_horizontal_flip_ir.cc | 4 +- .../ir/vision/random_horizontal_flip_ir.h | 4 +- .../random_horizontal_flip_with_bbox_ir.cc | 4 +- .../random_horizontal_flip_with_bbox_ir.h | 4 +- .../kernels/ir/vision/random_invert_ir.cc | 4 +- .../kernels/ir/vision/random_invert_ir.h | 4 +- .../kernels/ir/vision/random_lighting_ir.cc | 4 +- .../kernels/ir/vision/random_lighting_ir.h | 4 +- .../kernels/ir/vision/random_posterize_ir.cc | 4 +- .../kernels/ir/vision/random_posterize_ir.h | 4 +- .../kernels/ir/vision/random_resize_ir.cc | 4 +- .../kernels/ir/vision/random_resize_ir.h | 4 +- .../ir/vision/random_resize_with_bbox_ir.cc | 4 +- .../ir/vision/random_resize_with_bbox_ir.h | 4 +- .../ir/vision/random_resized_crop_ir.cc | 4 +- .../ir/vision/random_resized_crop_ir.h | 4 +- .../random_resized_crop_with_bbox_ir.cc | 4 +- .../vision/random_resized_crop_with_bbox_ir.h | 4 +- .../kernels/ir/vision/random_rotation_ir.cc | 4 +- .../kernels/ir/vision/random_rotation_ir.h | 4 +- .../ir/vision/random_select_subpolicy_ir.cc | 4 +- .../ir/vision/random_select_subpolicy_ir.h | 4 +- .../kernels/ir/vision/random_sharpness_ir.cc | 4 +- .../kernels/ir/vision/random_sharpness_ir.h | 4 +- .../kernels/ir/vision/random_solarize_ir.cc | 4 +- .../kernels/ir/vision/random_solarize_ir.h | 4 +- .../ir/vision/random_vertical_flip_ir.cc | 4 +- .../ir/vision/random_vertical_flip_ir.h | 4 +- .../random_vertical_flip_with_bbox_ir.cc | 4 +- .../random_vertical_flip_with_bbox_ir.h | 4 +- .../dataset/kernels/ir/vision/rescale_ir.cc | 4 +- .../dataset/kernels/ir/vision/rescale_ir.h | 4 +- .../dataset/kernels/ir/vision/resize_ir.cc | 4 +- .../dataset/kernels/ir/vision/resize_ir.h | 4 +- .../ir/vision/resize_preserve_ar_ir.cc | 4 +- .../kernels/ir/vision/resize_preserve_ar_ir.h | 4 +- .../kernels/ir/vision/resize_with_bbox_ir.cc | 4 +- .../kernels/ir/vision/resize_with_bbox_ir.h | 4 +- .../kernels/ir/vision/resized_crop_ir.cc | 4 +- .../kernels/ir/vision/resized_crop_ir.h | 4 +- .../kernels/ir/vision/rgb_to_bgr_ir.cc | 4 +- .../dataset/kernels/ir/vision/rgb_to_bgr_ir.h | 4 +- .../kernels/ir/vision/rgb_to_gray_ir.cc | 4 +- .../kernels/ir/vision/rgb_to_gray_ir.h | 4 +- .../kernels/ir/vision/rgba_to_bgr_ir.cc | 4 +- .../kernels/ir/vision/rgba_to_bgr_ir.h | 4 +- .../kernels/ir/vision/rgba_to_rgb_ir.cc | 4 +- .../kernels/ir/vision/rgba_to_rgb_ir.h | 4 +- .../dataset/kernels/ir/vision/rotate_ir.cc | 4 +- .../dataset/kernels/ir/vision/rotate_ir.h | 4 +- .../kernels/ir/vision/slice_patches_ir.cc | 4 +- .../kernels/ir/vision/slice_patches_ir.h | 4 +- .../dataset/kernels/ir/vision/solarize_ir.cc | 4 +- .../dataset/kernels/ir/vision/solarize_ir.h | 4 +- .../kernels/ir/vision/swap_red_blue_ir.cc | 4 +- .../kernels/ir/vision/swap_red_blue_ir.h | 4 +- .../dataset/kernels/ir/vision/to_tensor_ir.cc | 4 +- .../dataset/kernels/ir/vision/to_tensor_ir.h | 4 +- .../ir/vision/trivial_augment_wide_ir.cc | 4 +- .../ir/vision/trivial_augment_wide_ir.h | 4 +- .../kernels/ir/vision/uniform_aug_ir.cc | 4 +- .../kernels/ir/vision/uniform_aug_ir.h | 4 +- .../kernels/ir/vision/vertical_flip_ir.cc | 4 +- .../kernels/ir/vision/vertical_flip_ir.h | 4 +- .../minddata/dataset/kernels/tensor_op.cc | 4 +- .../minddata/dataset/kernels/tensor_op.h | 4 +- .../dataset/liteapi/include/datasets.h | 4 +- .../minddata/dataset/util/allocator.h | 4 +- mindspore-lite/minddata/dataset/util/arena.cc | 4 +- mindspore-lite/minddata/dataset/util/arena.h | 4 +- .../minddata/dataset/util/auto_index.h | 4 +- mindspore-lite/minddata/dataset/util/bit.h | 4 +- mindspore-lite/minddata/dataset/util/btree.h | 4 +- mindspore-lite/minddata/dataset/util/buddy.cc | 4 +- mindspore-lite/minddata/dataset/util/buddy.h | 4 +- .../minddata/dataset/util/circular_pool.cc | 4 +- .../minddata/dataset/util/circular_pool.h | 4 +- .../minddata/dataset/util/cond_var.cc | 4 +- .../minddata/dataset/util/cond_var.h | 4 +- .../minddata/dataset/util/ftok_key.cc | 4 +- .../minddata/dataset/util/ftok_key.h | 4 +- .../minddata/dataset/util/gil_scoped.h | 4 +- .../minddata/dataset/util/intrp_resource.h | 4 +- .../minddata/dataset/util/intrp_service.cc | 4 +- .../minddata/dataset/util/intrp_service.h | 4 +- .../minddata/dataset/util/json_helper.cc | 4 +- .../minddata/dataset/util/json_helper.h | 4 +- mindspore-lite/minddata/dataset/util/list.h | 4 +- mindspore-lite/minddata/dataset/util/lock.cc | 4 +- mindspore-lite/minddata/dataset/util/lock.h | 4 +- .../minddata/dataset/util/md_log_adapter.cc | 4 +- .../minddata/dataset/util/md_log_adapter.h | 4 +- .../minddata/dataset/util/memory_pool.cc | 4 +- .../minddata/dataset/util/memory_pool.h | 4 +- .../minddata/dataset/util/monitor.cc | 4 +- .../minddata/dataset/util/monitor.h | 4 +- mindspore-lite/minddata/dataset/util/path.cc | 4 +- mindspore-lite/minddata/dataset/util/path.h | 4 +- mindspore-lite/minddata/dataset/util/queue.h | 4 +- .../minddata/dataset/util/queue_map.h | 4 +- mindspore-lite/minddata/dataset/util/random.h | 4 +- mindspore-lite/minddata/dataset/util/rdr.cc | 4 +- mindspore-lite/minddata/dataset/util/rdr.h | 4 +- .../minddata/dataset/util/semaphore.cc | 4 +- .../minddata/dataset/util/semaphore.h | 4 +- .../minddata/dataset/util/service.cc | 4 +- .../minddata/dataset/util/service.h | 4 +- .../minddata/dataset/util/services.cc | 4 +- .../minddata/dataset/util/services.h | 4 +- .../minddata/dataset/util/shared_mem.cc | 4 +- .../minddata/dataset/util/shared_mem.h | 4 +- .../minddata/dataset/util/sig_handler.cc | 4 +- .../minddata/dataset/util/sig_handler.h | 4 +- mindspore-lite/minddata/dataset/util/slice.cc | 4 +- mindspore-lite/minddata/dataset/util/slice.h | 4 +- .../minddata/dataset/util/status.cc | 4 +- mindspore-lite/minddata/dataset/util/status.h | 4 +- .../minddata/dataset/util/system_pool.h | 4 +- mindspore-lite/minddata/dataset/util/task.cc | 4 +- mindspore-lite/minddata/dataset/util/task.h | 4 +- .../minddata/dataset/util/task_manager.cc | 4 +- .../minddata/dataset/util/task_manager.h | 4 +- mindspore-lite/minddata/dataset/util/treap.h | 4 +- .../minddata/dataset/util/validators.cc | 4 +- .../minddata/dataset/util/validators.h | 4 +- .../minddata/dataset/util/wait_post.cc | 4 +- .../minddata/dataset/util/wait_post.h | 4 +- mindspore-lite/minddata/wrapper/MDToDApi.cc | 4 +- mindspore-lite/minddata/wrapper/MDToDApi.h | 4 +- .../minddata/wrapper/album_op_android.cc | 4 +- .../minddata/wrapper/album_op_android.h | 4 +- .../providers/dpico/common/log_util.cc | 4 +- .../providers/dpico/common/log_util.h | 4 +- .../providers/dpico/common/op_attr.h | 4 +- .../providers/dpico/infer/custom_infer.cc | 8 +- .../providers/dpico/infer/custom_infer.h | 4 +- .../providers/nnie/src/custom_allocator.h | 4 +- .../providers/nnie/src/custom_fp32.cc | 8 +- .../providers/nnie/src/custom_fp32.h | 4 +- .../providers/nnie/src/custom_infer.cc | 8 +- .../providers/nnie/src/custom_infer.h | 4 +- .../providers/nnie/src/nnie_cfg_parser.cc | 4 +- .../providers/nnie/src/nnie_cfg_parser.h | 4 +- .../providers/nnie/src/nnie_common.cc | 4 +- .../providers/nnie/src/nnie_common.h | 4 +- .../providers/nnie/src/nnie_manager.cc | 4 +- .../providers/nnie/src/nnie_manager.h | 4 +- .../providers/nnie/src/nnie_memory.cc | 4 +- .../providers/nnie/src/nnie_memory.h | 4 +- .../providers/nnie/src/nnie_print.cc | 4 +- .../providers/nnie/src/nnie_print.h | 4 +- .../providers/nnie_proposal/src/proposal.cc | 4 +- .../providers/nnie_proposal/src/proposal.h | 4 +- .../nnie_proposal/src/proposal_fp32.cc | 8 +- .../nnie_proposal/src/proposal_fp32.h | 4 +- .../nnie_proposal/src/proposal_infer.cc | 8 +- .../nnie_proposal/src/proposal_infer.h | 4 +- mindspore-lite/python/src/tensor_numpy_impl.h | 4 +- mindspore-lite/src/common/config_infos.cc | 4 +- mindspore-lite/src/common/config_infos.h | 4 +- mindspore-lite/src/common/log.cc | 4 +- mindspore-lite/src/common/log.h | 4 +- mindspore-lite/src/common/log_adapter.h | 4 +- .../src/common/mutable_tensor_impl.h | 4 +- .../kernel/entrance_subgraph_kernel.cc | 4 +- .../kernel/entrance_subgraph_kernel.h | 4 +- .../kernel/exit_subgraph_kernel.cc | 4 +- .../kernel/exit_subgraph_kernel.h | 4 +- .../control_flow/kernel/identity_kernel.cc | 4 +- .../src/control_flow/kernel/identity_kernel.h | 4 +- mindspore-lite/src/executor/kernel_exec.cc | 4 +- .../src/executor/sub_graph_kernel.cc | 4 +- .../src/executor/sub_graph_kernel.h | 4 +- mindspore-lite/src/extendrt/cxx_api/cell.cc | 4 +- .../src/extendrt/cxx_api/context.cc | 4 +- mindspore-lite/src/extendrt/cxx_api/context.h | 4 +- mindspore-lite/src/extendrt/cxx_api/dlutils.h | 4 +- .../src/extendrt/cxx_api/file_utils.h | 4 +- .../src/extendrt/cxx_api/graph/graph.cc | 4 +- .../src/extendrt/cxx_api/graph/graph_data.cc | 4 +- .../src/extendrt/cxx_api/graph/graph_data.h | 4 +- .../src/extendrt/cxx_api/graph/graph_impl.h | 4 +- .../src/extendrt/cxx_api/model/model.cc | 4 +- .../src/extendrt/cxx_api/model/model_group.cc | 4 +- .../cxx_api/model/model_group_impl.cc | 4 +- .../extendrt/cxx_api/model/model_group_impl.h | 4 +- .../src/extendrt/cxx_api/model/model_impl.cc | 4 +- .../src/extendrt/cxx_api/model/model_impl.h | 4 +- .../cxx_api/model/multi_model_runner.cc | 4 +- .../model_pool/model_parallel_runner.cc | 4 +- .../model_pool/model_parallel_runner_impl.cc | 4 +- .../model_pool/model_parallel_runner_impl.h | 4 +- .../extendrt/cxx_api/model_pool/model_pool.cc | 4 +- .../extendrt/cxx_api/model_pool/model_pool.h | 4 +- .../cxx_api/model_pool/model_worker.cc | 4 +- .../cxx_api/model_pool/model_worker.h | 4 +- .../cxx_api/model_pool/predict_task_queue.cc | 4 +- .../cxx_api/model_pool/predict_task_queue.h | 4 +- .../cxx_api/model_pool/resource_manager.cc | 4 +- .../cxx_api/model_pool/resource_manager.h | 4 +- .../cxx_api/model_pool/runner_config.h | 4 +- .../src/extendrt/cxx_api/serialization.cc | 4 +- .../delegate/ascend_acl/acl_allocator.cc | 4 +- .../delegate/ascend_acl/acl_allocator.h | 4 +- .../delegate/ascend_acl/acl_env_guard.cc | 4 +- .../delegate/ascend_acl/acl_env_guard.h | 4 +- .../delegate/ascend_acl/acl_graph_executor.cc | 4 +- .../delegate/ascend_acl/acl_graph_executor.h | 4 +- .../delegate/ascend_acl/acl_mem_manager.cc | 4 +- .../delegate/ascend_acl/acl_mem_manager.h | 4 +- .../delegate/ascend_acl/acl_model_options.h | 4 +- .../delegate/ascend_acl/acl_plugin_impl.cc | 4 +- .../delegate/ascend_acl/acl_plugin_impl.h | 4 +- .../ascend_acl/ascend_allocator_plugin.cc | 4 +- .../ascend_acl/ascend_allocator_plugin.h | 4 +- .../delegate/ascend_acl/dyn_shape_process.cc | 4 +- .../delegate/ascend_acl/dyn_shape_process.h | 4 +- .../delegate/ascend_acl/model_infer.cc | 4 +- .../delegate/ascend_acl/model_infer.h | 4 +- .../delegate/ascend_acl/model_process.cc | 4 +- .../delegate/ascend_acl/model_process.h | 4 +- .../extendrt/delegate/ascend_acl/profiling.cc | 4 +- .../extendrt/delegate/ascend_acl/profiling.h | 4 +- .../ascend_ge/aoe_api_tune_process.cc | 4 +- .../delegate/ascend_ge/aoe_api_tune_process.h | 4 +- .../delegate/ascend_ge/ge_context_manager.cc | 4 +- .../delegate/ascend_ge/ge_context_manager.h | 4 +- .../delegate/ascend_ge/ge_device_context.cc | 4 +- .../delegate/ascend_ge/ge_device_context.h | 4 +- .../delegate/ascend_ge/ge_dynamic_utils.cc | 4 +- .../delegate/ascend_ge/ge_dynamic_utils.h | 4 +- .../delegate/ascend_ge/ge_graph_executor.cc | 4 +- .../delegate/ascend_ge/ge_graph_executor.h | 4 +- .../delegate/ascend_ge/ge_memory_manager.cc | 4 +- .../delegate/ascend_ge/ge_memory_manager.h | 4 +- .../delegate/ascend_ge/ge_plugin_impl.cc | 4 +- .../delegate/ascend_ge/ge_plugin_impl.h | 4 +- .../extendrt/delegate/ascend_ge/ge_utils.cc | 4 +- .../extendrt/delegate/ascend_ge/ge_utils.h | 4 +- .../src/extendrt/delegate/factory.cc | 4 +- .../src/extendrt/delegate/factory.h | 4 +- .../litert/func_graph_reuse_manager.cc | 4 +- .../litert/func_graph_reuse_manager.h | 4 +- .../graph_executor/litert/graph_executor.cc | 4 +- .../graph_executor/litert/graph_executor.h | 4 +- .../litert/litert_plugin_impl.h | 4 +- .../delegate/plugin/litert_executor_plugin.cc | 4 +- .../delegate/plugin/litert_executor_plugin.h | 4 +- .../src/extendrt/delegate_graph_executor.cc | 4 +- .../src/extendrt/delegate_graph_executor.h | 4 +- .../src/extendrt/dynamic_mem_allocator.cc | 4 +- .../src/extendrt/dynamic_mem_allocator.h | 4 +- .../src/extendrt/dynamic_mem_manager.cc | 4 +- .../src/extendrt/dynamic_mem_manager.h | 4 +- mindspore-lite/src/extendrt/execution_flow.cc | 4 +- mindspore-lite/src/extendrt/execution_flow.h | 4 +- mindspore-lite/src/extendrt/execution_plan.cc | 4 +- mindspore-lite/src/extendrt/execution_plan.h | 4 +- mindspore-lite/src/extendrt/factory.h | 4 +- .../src/extendrt/graph_scheduler.cc | 4 +- mindspore-lite/src/extendrt/graph_scheduler.h | 4 +- mindspore-lite/src/extendrt/infer_session.cc | 4 +- mindspore-lite/src/extendrt/infer_session.h | 4 +- .../mindir_loader/abstract_base_model.h | 4 +- .../extendrt/mindir_loader/abstract_kernel.h | 4 +- .../mindir_model/inner_kernel.cc | 4 +- .../mindir_loader/mindir_model/inner_kernel.h | 4 +- .../mindir_model/kernel_mod_mock.cc | 4 +- .../mindir_model/kernel_mod_util.cc | 4 +- .../mindir_model/kernel_mod_util.h | 4 +- .../extendrt/mindir_loader/model_loader.cc | 4 +- .../src/extendrt/mindir_loader/model_loader.h | 4 +- .../extendrt/mock/lite_runtime/converters.cc | 4 +- .../extendrt/mock/lite_runtime/converters.h | 4 +- .../populate/arithmetic_populate.cc | 4 +- .../populate/arithmetic_populate.h | 4 +- .../base_operator_populate_register.cc | 4 +- .../base_operator_populate_register.h | 4 +- mindspore-lite/src/extendrt/model_manager.cc | 4 +- mindspore-lite/src/extendrt/model_manager.h | 4 +- mindspore-lite/src/extendrt/numa_adapter.cc | 4 +- mindspore-lite/src/extendrt/numa_adapter.h | 4 +- .../src/extendrt/session/delegate_session.cc | 4 +- .../src/extendrt/session/delegate_session.h | 4 +- .../src/extendrt/session/factory.cc | 4 +- mindspore-lite/src/extendrt/session/factory.h | 4 +- .../extendrt/session/lite_graph_executor.h | 4 +- mindspore-lite/src/extendrt/session/type.h | 4 +- mindspore-lite/src/extendrt/signal_handler.cc | 4 +- mindspore-lite/src/extendrt/signal_handler.h | 4 +- .../src/extendrt/subgraph_kernel.cc | 4 +- mindspore-lite/src/extendrt/subgraph_kernel.h | 4 +- .../src/extendrt/utils/func_graph_utils.cc | 4 +- .../src/extendrt/utils/func_graph_utils.h | 4 +- .../src/extendrt/utils/segment_utils.h | 4 +- .../src/extendrt/utils/serialization.cc | 4 +- .../src/extendrt/utils/serialization.h | 4 +- .../src/extendrt/utils/tensor_default_impl.h | 5 +- .../src/extendrt/utils/tensor_utils.cc | 4 +- .../src/extendrt/utils/tensor_utils.h | 4 +- mindspore-lite/src/infer/context.h | 4 +- mindspore-lite/src/infer/kernel.h | 4 +- mindspore-lite/src/infer/primitive_type.cc | 4 +- mindspore-lite/src/infer/tensor.h | 4 +- mindspore-lite/src/litert/allocator.cc | 4 +- mindspore-lite/src/litert/c_api/model_c.cc | 4 +- .../cxx_api/callback/callback_adapter.h | 4 +- .../litert/cxx_api/callback/callback_impl.h | 4 +- .../src/litert/cxx_api/callback/ckpt_saver.cc | 4 +- .../litert/cxx_api/callback/loss_monitor.cc | 4 +- .../litert/cxx_api/callback/lr_scheduler.cc | 4 +- .../litert/cxx_api/callback/train_accuracy.cc | 4 +- mindspore-lite/src/litert/cxx_api/cell.cc | 4 +- mindspore-lite/src/litert/cxx_api/context.cc | 4 +- mindspore-lite/src/litert/cxx_api/context.h | 4 +- .../src/litert/cxx_api/converters.cc | 4 +- .../src/litert/cxx_api/converters.h | 4 +- .../src/litert/cxx_api/graph/graph.cc | 4 +- .../src/litert/cxx_api/graph/graph_data.h | 4 +- mindspore-lite/src/litert/cxx_api/kernel.cc | 142 +- .../kernel_executor/custom_om_infer.cc | 4 +- .../kernel_executor/custom_om_kernel.cc | 4 +- .../kernel_executor/custom_om_kernel.h | 4 +- .../kernel_executor/kernel_executor.cc | 4 +- .../cxx_api/kernel_executor/kernel_executor.h | 4 +- .../kernel_executor/kernel_executor_impl.cc | 4 +- .../kernel_executor/kernel_executor_impl.h | 4 +- .../src/litert/cxx_api/metrics/accuracy.cc | 4 +- .../litert/cxx_api/metrics/metrics_adapter.h | 4 +- .../src/litert/cxx_api/metrics/metrics_impl.h | 4 +- .../src/litert/cxx_api/model/model.cc | 4 +- .../src/litert/cxx_api/model/model_group.cc | 4 +- .../litert/cxx_api/model/model_group_impl.cc | 4 +- .../litert/cxx_api/model/model_group_impl.h | 4 +- .../src/litert/cxx_api/model/model_impl.cc | 4 +- .../src/litert/cxx_api/model/model_impl.h | 4 +- .../src/litert/cxx_api/serialization.cc | 4 +- .../src/litert/cxx_api/tensor/tensor_impl.cc | 4 +- .../src/litert/cxx_api/tensor/tensor_impl.h | 4 +- .../src/litert/cxx_api/tensor_utils.cc | 4 +- .../src/litert/cxx_api/tensor_utils.h | 4 +- .../src/litert/cxx_api/train/converters.cc | 4 +- .../src/litert/cxx_api/train/model.cc | 4 +- .../src/litert/cxx_api/train/model_build.cc | 4 +- .../litert/cxx_api/train/model_build_impl.cc | 4 +- .../src/litert/cxx_api/train/model_impl.cc | 4 +- .../src/litert/cxx_api/train/train_support.cc | 4 +- mindspore-lite/src/litert/cxx_api/types.cc | 4 +- .../coreml/stub/coreml_delegate_stub.cc | 4 +- .../parameter_cache/cache_algorithm.h | 4 +- .../delegate/parameter_cache/cache_mem_base.h | 4 +- .../parameter_cache/embedding_cache.cc | 4 +- .../parameter_cache/embedding_cache.h | 4 +- .../embedding_cache_manager.cc | 4 +- .../parameter_cache/embedding_cache_manager.h | 4 +- .../parameter_cache/gpu/gpu_cache_mem.cc | 4 +- .../parameter_cache/gpu/gpu_cache_mem.h | 4 +- .../delegate/parameter_cache/lfu_cache.cc | 4 +- .../delegate/parameter_cache/lfu_cache.h | 4 +- .../parameter_cache/load_host_cache_model.cc | 4 +- .../parameter_cache/load_host_cache_model.h | 4 +- mindspore-lite/src/litert/inner_allocator.cc | 4 +- mindspore-lite/src/litert/inner_allocator.h | 4 +- .../litert/kernel/cpu/base/arithmetic_base.cc | 4 +- .../litert/kernel/cpu/base/arithmetic_base.h | 4 +- .../src/litert/kernel/cpu/base/assert.cc | 4 +- .../src/litert/kernel/cpu/base/assert.h | 4 +- .../src/litert/kernel/cpu/base/call.cc | 4 +- .../src/litert/kernel/cpu/base/call.h | 4 +- .../kernel/cpu/base/constant_of_shape.cc | 4 +- .../kernel/cpu/base/constant_of_shape.h | 4 +- .../kernel/cpu/base/convolution_base.cc | 4 +- .../litert/kernel/cpu/base/convolution_base.h | 4 +- .../litert/kernel/cpu/base/custom_is_inf.cc | 4 +- .../litert/kernel/cpu/base/custom_is_inf.h | 4 +- .../kernel/cpu/base/custom_masked_fill.cc | 4 +- .../kernel/cpu/base/custom_masked_fill.h | 4 +- .../kernel/cpu/base/custom_tensor_scatter.cc | 4 +- .../kernel/cpu/base/custom_tensor_scatter.h | 4 +- .../cpu/base/detection_post_process_base.cc | 4 +- .../cpu/base/detection_post_process_base.h | 4 +- .../kernel/cpu/base/format_transpose.cc | 4 +- .../litert/kernel/cpu/base/format_transpose.h | 4 +- .../kernel/cpu/base/group_convolution_base.cc | 4 +- .../kernel/cpu/base/group_convolution_base.h | 4 +- .../cpu/base/group_convolution_creator.cc | 4 +- .../cpu/base/group_convolution_creator.h | 4 +- .../kernel/cpu/base/layout_transform.cc | 4 +- .../litert/kernel/cpu/base/layout_transform.h | 4 +- .../litert/kernel/cpu/base/partial_fusion.cc | 4 +- .../litert/kernel/cpu/base/partial_fusion.h | 4 +- .../kernel/cpu/base/quant_dtype_cast.cc | 4 +- .../litert/kernel/cpu/base/quant_dtype_cast.h | 4 +- .../litert/kernel/cpu/base/random_normal.cc | 4 +- .../litert/kernel/cpu/base/random_normal.h | 4 +- .../src/litert/kernel/cpu/base/reduce_base.cc | 4 +- .../src/litert/kernel/cpu/base/reduce_base.h | 4 +- .../src/litert/kernel/cpu/base/resize_base.cc | 4 +- .../src/litert/kernel/cpu/base/resize_base.h | 4 +- .../litert/kernel/cpu/base/scatter_nd_base.cc | 4 +- .../litert/kernel/cpu/base/scatter_nd_base.h | 4 +- .../kernel/cpu/base/scatter_nd_binary.cc | 4 +- .../kernel/cpu/base/scatter_nd_binary.h | 4 +- .../src/litert/kernel/cpu/base/select.cc | 4 +- .../src/litert/kernel/cpu/base/select.h | 4 +- .../src/litert/kernel/cpu/base/split_base.cc | 4 +- .../src/litert/kernel/cpu/base/split_base.h | 4 +- .../cpu/base/split_with_over_lap_base.cc | 4 +- .../cpu/base/split_with_over_lap_base.h | 4 +- .../kernel/cpu/base/tensor_scatter_add.cc | 4 +- .../kernel/cpu/base/tensor_scatter_add.h | 4 +- .../litert/kernel/cpu/base/transpose_base.cc | 4 +- .../litert/kernel/cpu/base/transpose_base.h | 4 +- .../src/litert/kernel/cpu/control/switch.cc | 4 +- .../src/litert/kernel/cpu/control/switch.h | 4 +- .../litert/kernel/cpu/control/switch_layer.cc | 4 +- .../litert/kernel/cpu/control/switch_layer.h | 4 +- .../litert/kernel/cpu/control/tensor_array.cc | 4 +- .../litert/kernel/cpu/control/tensor_array.h | 4 +- .../cpu/control/tensorlist_fromtensor.cc | 4 +- .../cpu/control/tensorlist_fromtensor.h | 4 +- .../kernel/cpu/control/tensorlist_getitem.cc | 4 +- .../kernel/cpu/control/tensorlist_getitem.h | 4 +- .../kernel/cpu/control/tensorlist_reserve.cc | 4 +- .../kernel/cpu/control/tensorlist_reserve.h | 4 +- .../kernel/cpu/control/tensorlist_setitem.cc | 4 +- .../kernel/cpu/control/tensorlist_setitem.h | 4 +- .../kernel/cpu/control/tensorlist_stack.cc | 4 +- .../kernel/cpu/control/tensorlist_stack.h | 4 +- .../litert/kernel/cpu/fp16/biasadd_fp16.cc | 4 +- .../src/litert/kernel/cpu/fp16/biasadd_fp16.h | 4 +- .../src/litert/kernel/cpu/fp16/cast_fp16.cc | 4 +- .../src/litert/kernel/cpu/fp16/cast_fp16.h | 4 +- .../src/litert/kernel/cpu/fp16/common_fp16.cc | 4 +- .../src/litert/kernel/cpu/fp16/common_fp16.h | 4 +- .../kernel/cpu/fp16/convolution_1x1_fp16.cc | 4 +- .../kernel/cpu/fp16/convolution_1x1_fp16.h | 4 +- .../cpu/fp16/convolution_delegate_fp16.cc | 4 +- .../cpu/fp16/convolution_delegate_fp16.h | 4 +- .../fp16/convolution_depthwise_3x3_fp16.cc | 4 +- .../cpu/fp16/convolution_depthwise_3x3_fp16.h | 4 +- .../cpu/fp16/convolution_depthwise_fp16.cc | 4 +- .../cpu/fp16/convolution_depthwise_fp16.h | 4 +- .../convolution_depthwise_slidewindow_fp16.cc | 4 +- .../convolution_depthwise_slidewindow_fp16.h | 4 +- .../kernel/cpu/fp16/convolution_fp16.cc | 4 +- .../litert/kernel/cpu/fp16/convolution_fp16.h | 4 +- .../cpu/fp16/convolution_winograd_fp16.cc | 4 +- .../cpu/fp16/convolution_winograd_fp16.h | 4 +- .../litert/kernel/cpu/fp16/custom_gru_fp16.cc | 4 +- .../litert/kernel/cpu/fp16/custom_gru_fp16.h | 4 +- .../cpu/fp16/deconvolution_depthwise_fp16.cc | 4 +- .../cpu/fp16/deconvolution_depthwise_fp16.h | 4 +- .../kernel/cpu/fp16/deconvolution_fp16.cc | 4 +- .../kernel/cpu/fp16/deconvolution_fp16.h | 4 +- .../cpu/fp16/deconvolution_winograd_fp16.cc | 4 +- .../cpu/fp16/deconvolution_winograd_fp16.h | 4 +- .../kernel/cpu/fp16/dynamic_quant_fp16.cc | 4 +- .../kernel/cpu/fp16/dynamic_quant_fp16.h | 4 +- .../kernel/cpu/fp16/fullconnection_fp16.cc | 4 +- .../kernel/cpu/fp16/fullconnection_fp16.h | 4 +- .../kernel/cpu/fp16/group_convolution_fp16.cc | 4 +- .../kernel/cpu/fp16/group_convolution_fp16.h | 4 +- .../src/litert/kernel/cpu/fp16/gru_fp16.cc | 4 +- .../src/litert/kernel/cpu/fp16/gru_fp16.h | 4 +- .../kernel/cpu/fp16/instance_norm_fp16.cc | 4 +- .../kernel/cpu/fp16/instance_norm_fp16.h | 4 +- .../kernel/cpu/fp16/layout_transform_fp16.cc | 4 +- .../kernel/cpu/fp16/layout_transform_fp16.h | 4 +- .../src/litert/kernel/cpu/fp16/lstm_fp16.cc | 4 +- .../litert/kernel/cpu/fp16/lstm_fp16_base.cc | 4 +- .../litert/kernel/cpu/fp16/lstm_fp16_base.h | 4 +- .../kernel/cpu/fp16/lstm_mindir_fp16.cc | 4 +- .../litert/kernel/cpu/fp16/lstm_mindir_fp16.h | 4 +- .../kernel/cpu/fp16/lstm_non_mindir_fp16.cc | 4 +- .../kernel/cpu/fp16/lstm_non_mindir_fp16.h | 4 +- .../kernel/cpu/fp16/matmul_base_fp16.cc | 4 +- .../litert/kernel/cpu/fp16/matmul_base_fp16.h | 4 +- .../src/litert/kernel/cpu/fp16/matmul_fp16.cc | 4 +- .../src/litert/kernel/cpu/fp16/matmul_fp16.h | 4 +- .../kernel/cpu/fp16/quant_dtype_cast_fp16.cc | 4 +- .../kernel/cpu/fp16/quant_dtype_cast_fp16.h | 4 +- .../src/litert/kernel/cpu/fp16/resize_fp16.cc | 4 +- .../src/litert/kernel/cpu/fp16/resize_fp16.h | 4 +- .../cpu/fp16_grad/activation_fp16_grad.cc | 4 +- .../cpu/fp16_grad/activation_fp16_grad.h | 4 +- .../cpu/fp16_grad/arithmetic_fp16_grad.cc | 4 +- .../cpu/fp16_grad/arithmetic_fp16_grad.h | 4 +- .../fp16_grad/arithmetic_fp16_self_grad.cc | 4 +- .../cpu/fp16_grad/arithmetic_fp16_self_grad.h | 4 +- .../kernel/cpu/fp16_grad/bias_fp16_grad.cc | 4 +- .../kernel/cpu/fp16_grad/bias_fp16_grad.h | 4 +- .../kernel/cpu/fp16_grad/bn_fp16_grad.cc | 4 +- .../kernel/cpu/fp16_grad/bn_fp16_grad.h | 4 +- .../fp16_grad/convolution_fp16_grad_filter.cc | 4 +- .../fp16_grad/convolution_fp16_grad_filter.h | 4 +- .../fp16_grad/convolution_fp16_grad_input.cc | 4 +- .../fp16_grad/convolution_fp16_grad_input.h | 4 +- .../kernel/cpu/fp16_grad/dropout_fp16_grad.cc | 4 +- .../kernel/cpu/fp16_grad/dropout_fp16_grad.h | 4 +- .../cpu/fp16_grad/layernorm_fp16_grad.cc | 4 +- .../cpu/fp16_grad/layernorm_fp16_grad.h | 4 +- .../kernel/cpu/fp16_grad/neg_fp16_grad.cc | 4 +- .../kernel/cpu/fp16_grad/neg_fp16_grad.h | 4 +- .../kernel/cpu/fp16_grad/pooling_fp16_grad.cc | 4 +- .../kernel/cpu/fp16_grad/pooling_fp16_grad.h | 4 +- .../kernel/cpu/fp16_grad/resize_fp16_grad.cc | 4 +- .../kernel/cpu/fp16_grad/resize_fp16_grad.h | 4 +- .../cpu/fp16_grad/strided_slice_fp16_grad.cc | 4 +- .../cpu/fp16_grad/strided_slice_fp16_grad.h | 4 +- .../fp16_grad/unsorted_segment_sum_fp16.cc | 4 +- .../cpu/fp16_grad/unsorted_segment_sum_fp16.h | 4 +- .../src/litert/kernel/cpu/fp32/adder_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/adder_fp32.h | 4 +- .../src/litert/kernel/cpu/fp32/affine_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/affine_fp32.h | 4 +- .../litert/kernel/cpu/fp32/all_gather_fp32.cc | 4 +- .../litert/kernel/cpu/fp32/all_gather_fp32.h | 4 +- .../litert/kernel/cpu/fp32/arithmetic_fp32.cc | 4 +- .../litert/kernel/cpu/fp32/arithmetic_fp32.h | 4 +- .../kernel/cpu/fp32/broadcast_to_fp32.cc | 4 +- .../kernel/cpu/fp32/broadcast_to_fp32.h | 4 +- .../kernel/cpu/fp32/cast_for_x86_fp16.cc | 4 +- .../src/litert/kernel/cpu/fp32/cast_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/cast_fp32.h | 4 +- .../kernel/cpu/fp32/convolution_1x1_fp32.cc | 4 +- .../kernel/cpu/fp32/convolution_1x1_fp32.h | 4 +- .../cpu/fp32/convolution_delegate_fp32.cc | 4 +- .../cpu/fp32/convolution_delegate_fp32.h | 4 +- .../fp32/convolution_depthwise_3x3_fp32.cc | 4 +- .../cpu/fp32/convolution_depthwise_3x3_fp32.h | 4 +- .../cpu/fp32/convolution_depthwise_fp32.cc | 4 +- .../cpu/fp32/convolution_depthwise_fp32.h | 4 +- .../convolution_depthwise_indirect_fp32.cc | 4 +- .../convolution_depthwise_indirect_fp32.h | 4 +- .../convolution_depthwise_slidewindow_fp32.cc | 4 +- .../convolution_depthwise_slidewindow_fp32.h | 4 +- ...volution_depthwise_slidewindow_x86_fp32.cc | 4 +- ...nvolution_depthwise_slidewindow_x86_fp32.h | 4 +- .../kernel/cpu/fp32/convolution_fp32.cc | 4 +- .../litert/kernel/cpu/fp32/convolution_fp32.h | 4 +- .../cpu/fp32/convolution_im2col_arm32_fp32.cc | 4 +- .../cpu/fp32/convolution_im2col_arm32_fp32.h | 4 +- .../cpu/fp32/convolution_im2col_arm64_fp32.cc | 4 +- .../cpu/fp32/convolution_im2col_arm64_fp32.h | 4 +- .../fp32/convolution_im2col_avx512_fp32.cc | 4 +- .../cpu/fp32/convolution_im2col_avx512_fp32.h | 4 +- .../cpu/fp32/convolution_im2col_avx_fp32.cc | 4 +- .../cpu/fp32/convolution_im2col_avx_fp32.h | 4 +- .../cpu/fp32/convolution_im2col_base_fp32.cc | 4 +- .../cpu/fp32/convolution_im2col_base_fp32.h | 4 +- .../cpu/fp32/convolution_im2col_fp32.cc | 4 +- .../kernel/cpu/fp32/convolution_im2col_fp32.h | 4 +- .../cpu/fp32/convolution_im2col_sse_fp32.cc | 4 +- .../cpu/fp32/convolution_im2col_sse_fp32.h | 4 +- .../convolution_slidewindow_arm64_fp32.cc | 4 +- .../fp32/convolution_slidewindow_arm64_fp32.h | 4 +- .../fp32/convolution_slidewindow_avx_fp32.cc | 4 +- .../fp32/convolution_slidewindow_avx_fp32.h | 4 +- .../cpu/fp32/convolution_slidewindow_fp32.cc | 4 +- .../cpu/fp32/convolution_slidewindow_fp32.h | 4 +- .../cpu/fp32/convolution_sw_1x1_fp32.cc | 4 +- .../kernel/cpu/fp32/convolution_sw_1x1_fp32.h | 4 +- .../fp32/convolution_winograd_arm32_fp32.cc | 4 +- .../fp32/convolution_winograd_arm32_fp32.h | 4 +- .../fp32/convolution_winograd_arm64_fp32.cc | 4 +- .../fp32/convolution_winograd_arm64_fp32.h | 4 +- .../cpu/fp32/convolution_winograd_avx_fp32.cc | 4 +- .../cpu/fp32/convolution_winograd_avx_fp32.h | 4 +- .../fp32/convolution_winograd_base_fp32.cc | 4 +- .../cpu/fp32/convolution_winograd_base_fp32.h | 4 +- .../cpu/fp32/convolution_winograd_fp32.cc | 4 +- .../cpu/fp32/convolution_winograd_fp32.h | 4 +- .../cpu/fp32/convolution_winograd_sse_fp32.cc | 4 +- .../cpu/fp32/convolution_winograd_sse_fp32.h | 4 +- .../src/litert/kernel/cpu/fp32/cumsum_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/cumsum_fp32.h | 4 +- .../litert/kernel/cpu/fp32/custom_gru_fp32.cc | 4 +- .../litert/kernel/cpu/fp32/custom_gru_fp32.h | 4 +- .../cpu/fp32/deconvolution_depthwise_fp32.cc | 4 +- .../cpu/fp32/deconvolution_depthwise_fp32.h | 4 +- .../kernel/cpu/fp32/deconvolution_fp32.cc | 4 +- .../kernel/cpu/fp32/deconvolution_fp32.h | 4 +- .../cpu/fp32/deconvolution_winograd_fp32.cc | 4 +- .../cpu/fp32/deconvolution_winograd_fp32.h | 4 +- .../cpu/fp32/detection_post_process_fp32.cc | 4 +- .../cpu/fp32/detection_post_process_fp32.h | 4 +- .../kernel/cpu/fp32/embedding_lookup_fp32.cc | 4 +- .../kernel/cpu/fp32/embedding_lookup_fp32.h | 4 +- .../src/litert/kernel/cpu/fp32/glu_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/glu_fp32.h | 4 +- .../kernel/cpu/fp32/group_convolution_fp32.cc | 4 +- .../kernel/cpu/fp32/group_convolution_fp32.h | 4 +- .../src/litert/kernel/cpu/fp32/gru_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/gru_fp32.h | 4 +- .../kernel/cpu/fp32/instance_norm_fp32.cc | 4 +- .../kernel/cpu/fp32/instance_norm_fp32.h | 4 +- .../cpu/fp32/invert_permutation_fp32.cc | 4 +- .../kernel/cpu/fp32/invert_permutation_fp32.h | 4 +- .../litert/kernel/cpu/fp32/l2_norm_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/l2_norm_fp32.h | 4 +- .../src/litert/kernel/cpu/fp32/lstm_fp32.cc | 4 +- .../litert/kernel/cpu/fp32/lstm_fp32_base.cc | 4 +- .../litert/kernel/cpu/fp32/lstm_fp32_base.h | 4 +- .../kernel/cpu/fp32/lstm_mindir_fp32.cc | 4 +- .../litert/kernel/cpu/fp32/lstm_mindir_fp32.h | 4 +- .../kernel/cpu/fp32/lstm_non_mindir_fp32.cc | 4 +- .../kernel/cpu/fp32/lstm_non_mindir_fp32.h | 4 +- .../src/litert/kernel/cpu/fp32/matmul_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/matmul_fp32.h | 4 +- .../kernel/cpu/fp32/matmul_fp32_arm32.cc | 4 +- .../kernel/cpu/fp32/matmul_fp32_arm32.h | 4 +- .../kernel/cpu/fp32/matmul_fp32_arm64.cc | 4 +- .../kernel/cpu/fp32/matmul_fp32_arm64.h | 4 +- .../litert/kernel/cpu/fp32/matmul_fp32_avx.cc | 4 +- .../litert/kernel/cpu/fp32/matmul_fp32_avx.h | 4 +- .../kernel/cpu/fp32/matmul_fp32_avx512.cc | 4 +- .../kernel/cpu/fp32/matmul_fp32_avx512.h | 4 +- .../kernel/cpu/fp32/matmul_fp32_base.cc | 4 +- .../litert/kernel/cpu/fp32/matmul_fp32_base.h | 4 +- .../litert/kernel/cpu/fp32/matmul_fp32_sse.cc | 4 +- .../litert/kernel/cpu/fp32/matmul_fp32_sse.h | 4 +- .../cpu/fp32/non_max_suppression_fp32.cc | 4 +- .../cpu/fp32/non_max_suppression_fp32.h | 4 +- .../online_fusion/cast_gather_reduce_fp32.cc | 4 +- .../online_fusion/cast_gather_reduce_fp32.h | 4 +- .../fp32/online_fusion/reduce_concat_fp32.cc | 4 +- .../fp32/online_fusion/reduce_concat_fp32.h | 4 +- .../online_fusion/split_reduce_concat_fp32.cc | 4 +- .../online_fusion/split_reduce_concat_fp32.h | 4 +- .../kernel/cpu/fp32/reduce_scatter_fp32.cc | 4 +- .../kernel/cpu/fp32/reduce_scatter_fp32.h | 4 +- .../fp32/relative_position_attention_fp32.cc | 4 +- .../fp32/relative_position_attention_fp32.h | 4 +- .../src/litert/kernel/cpu/fp32/resize_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/resize_fp32.h | 4 +- .../kernel/cpu/fp32/reverse_sequence_fp32.cc | 4 +- .../kernel/cpu/fp32/reverse_sequence_fp32.h | 4 +- .../kernel/cpu/fp32/roi_pooling_fp32.cc | 4 +- .../litert/kernel/cpu/fp32/roi_pooling_fp32.h | 4 +- .../kernel/cpu/fp32/scatter_nd_update_fp32.cc | 4 +- .../kernel/cpu/fp32/scatter_nd_update_fp32.h | 4 +- .../kernel/cpu/fp32/shape_fusion_fp32.cc | 4 +- .../kernel/cpu/fp32/shape_fusion_fp32.h | 4 +- .../kernel/cpu/fp32/space_to_batch_fp32.cc | 4 +- .../kernel/cpu/fp32/space_to_batch_fp32.h | 4 +- .../kernel/cpu/fp32/space_to_depth_fp32.cc | 4 +- .../kernel/cpu/fp32/space_to_depth_fp32.h | 4 +- .../cpu/fp32/sparse_fill_empty_rows_fp32.cc | 4 +- .../cpu/fp32/sparse_fill_empty_rows_fp32.h | 4 +- .../kernel/cpu/fp32/sparse_reshape_fp32.cc | 4 +- .../kernel/cpu/fp32/sparse_reshape_fp32.h | 4 +- .../cpu/fp32/sparse_segment_sum_fp32.cc | 4 +- .../kernel/cpu/fp32/sparse_segment_sum_fp32.h | 4 +- .../kernel/cpu/fp32/sparse_to_dense_fp32.cc | 4 +- .../kernel/cpu/fp32/sparse_to_dense_fp32.h | 4 +- .../src/litert/kernel/cpu/fp32/topk_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/topk_fp32.h | 4 +- .../kernel/cpu/fp32/transpose_server_fp32.cc | 4 +- .../kernel/cpu/fp32/transpose_server_fp32.h | 4 +- .../kernel/cpu/fp32/uniform_real_fp32.cc | 4 +- .../kernel/cpu/fp32/uniform_real_fp32.h | 4 +- .../litert/kernel/cpu/fp32/unstack_fp32.cc | 4 +- .../src/litert/kernel/cpu/fp32/unstack_fp32.h | 4 +- .../kernel/cpu/fp32_grad/activation_grad.cc | 4 +- .../kernel/cpu/fp32_grad/activation_grad.h | 4 +- .../src/litert/kernel/cpu/fp32_grad/adam.cc | 4 +- .../src/litert/kernel/cpu/fp32_grad/adam.h | 4 +- .../kernel/cpu/fp32_grad/adam_weight_decay.cc | 4 +- .../kernel/cpu/fp32_grad/adam_weight_decay.h | 4 +- .../kernel/cpu/fp32_grad/apply_momentum.cc | 4 +- .../kernel/cpu/fp32_grad/apply_momentum.h | 4 +- .../kernel/cpu/fp32_grad/arithmetic_grad.cc | 4 +- .../kernel/cpu/fp32_grad/arithmetic_grad.h | 4 +- .../cpu/fp32_grad/arithmetic_self_grad.cc | 4 +- .../cpu/fp32_grad/arithmetic_self_grad.h | 4 +- .../src/litert/kernel/cpu/fp32_grad/assign.cc | 4 +- .../src/litert/kernel/cpu/fp32_grad/assign.h | 4 +- .../litert/kernel/cpu/fp32_grad/bias_grad.cc | 4 +- .../litert/kernel/cpu/fp32_grad/bias_grad.h | 4 +- .../cpu/fp32_grad/binary_cross_entropy.cc | 4 +- .../cpu/fp32_grad/binary_cross_entropy.h | 4 +- .../fp32_grad/binary_cross_entropy_grad.cc | 4 +- .../cpu/fp32_grad/binary_cross_entropy_grad.h | 4 +- .../litert/kernel/cpu/fp32_grad/bn_grad.cc | 4 +- .../src/litert/kernel/cpu/fp32_grad/bn_grad.h | 4 +- .../kernel/cpu/fp32_grad/convolution.cc | 4 +- .../litert/kernel/cpu/fp32_grad/convolution.h | 4 +- .../cpu/fp32_grad/convolution_grad_filter.cc | 4 +- .../cpu/fp32_grad/convolution_grad_filter.h | 4 +- .../cpu/fp32_grad/convolution_grad_input.cc | 4 +- .../cpu/fp32_grad/convolution_grad_input.h | 4 +- .../fp32_grad/deconvolution_grad_filter.cc | 4 +- .../cpu/fp32_grad/deconvolution_grad_filter.h | 4 +- .../litert/kernel/cpu/fp32_grad/dropout.cc | 4 +- .../src/litert/kernel/cpu/fp32_grad/dropout.h | 4 +- .../kernel/cpu/fp32_grad/dropout_grad.cc | 4 +- .../kernel/cpu/fp32_grad/dropout_grad.h | 4 +- .../kernel/cpu/fp32_grad/layernorm_grad.cc | 4 +- .../kernel/cpu/fp32_grad/layernorm_grad.h | 4 +- .../cpu/fp32_grad/lstm_grad_data_fp32.cc | 4 +- .../cpu/fp32_grad/lstm_grad_data_fp32.h | 4 +- .../kernel/cpu/fp32_grad/lstm_grad_fp32.cc | 4 +- .../kernel/cpu/fp32_grad/lstm_grad_fp32.h | 4 +- .../cpu/fp32_grad/lstm_grad_weight_fp32.cc | 4 +- .../cpu/fp32_grad/lstm_grad_weight_fp32.h | 4 +- .../litert/kernel/cpu/fp32_grad/make_tuple.h | 4 +- .../litert/kernel/cpu/fp32_grad/neg_grad.cc | 4 +- .../litert/kernel/cpu/fp32_grad/neg_grad.h | 4 +- .../kernel/cpu/fp32_grad/nllloss_grad.cc | 4 +- .../kernel/cpu/fp32_grad/nllloss_grad.h | 4 +- .../kernel/cpu/fp32_grad/pooling_grad.cc | 4 +- .../kernel/cpu/fp32_grad/pooling_grad.h | 4 +- .../litert/kernel/cpu/fp32_grad/power_grad.cc | 4 +- .../litert/kernel/cpu/fp32_grad/power_grad.h | 4 +- .../kernel/cpu/fp32_grad/resize_grad.cc | 4 +- .../litert/kernel/cpu/fp32_grad/resize_grad.h | 4 +- .../src/litert/kernel/cpu/fp32_grad/sgd.cc | 4 +- .../src/litert/kernel/cpu/fp32_grad/sgd.h | 4 +- .../sigmoid_cross_entropy_with_logits.cc | 4 +- .../sigmoid_cross_entropy_with_logits.h | 4 +- .../sigmoid_cross_entropy_with_logits_grad.cc | 4 +- .../sigmoid_cross_entropy_with_logits_grad.h | 4 +- .../kernel/cpu/fp32_grad/smooth_l1_loss.cc | 4 +- .../kernel/cpu/fp32_grad/smooth_l1_loss.h | 4 +- .../cpu/fp32_grad/smooth_l1_loss_grad.cc | 4 +- .../cpu/fp32_grad/smooth_l1_loss_grad.h | 4 +- .../softmax_cross_entropy_with_logits.cc | 4 +- .../softmax_cross_entropy_with_logits.h | 4 +- .../kernel/cpu/fp32_grad/softmax_grad.cc | 4 +- .../kernel/cpu/fp32_grad/softmax_grad.h | 4 +- ...parse_softmax_cross_entropy_with_logits.cc | 4 +- ...sparse_softmax_cross_entropy_with_logits.h | 4 +- .../cpu/fp32_grad/strided_slice_grad.cc | 4 +- .../kernel/cpu/fp32_grad/strided_slice_grad.h | 4 +- .../cpu/fp32_grad/unsorted_segment_sum.cc | 4 +- .../cpu/fp32_grad/unsorted_segment_sum.h | 4 +- .../cpu/fp32_sparse/matmul_sparse_fp32.cc | 4 +- .../cpu/fp32_sparse/matmul_sparse_fp32.h | 4 +- .../litert/kernel/cpu/int8/activation_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/add_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/add_int8.h | 4 +- .../litert/kernel/cpu/int8/argminmax_int8.cc | 4 +- .../litert/kernel/cpu/int8/argminmax_int8.h | 4 +- .../litert/kernel/cpu/int8/arithmetic_int8.cc | 4 +- .../litert/kernel/cpu/int8/arithmetic_int8.h | 4 +- .../kernel/cpu/int8/arithmetic_self_int8.cc | 4 +- .../kernel/cpu/int8/arithmetic_self_int8.h | 4 +- .../kernel/cpu/int8/batch_to_space_int8.cc | 4 +- .../kernel/cpu/int8/batch_to_space_int8.h | 4 +- .../litert/kernel/cpu/int8/batchnorm_int8.cc | 4 +- .../litert/kernel/cpu/int8/batchnorm_int8.h | 4 +- .../src/litert/kernel/cpu/int8/concat_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/concat_int8.h | 4 +- .../kernel/cpu/int8/convolution_1x1_int8.cc | 4 +- .../kernel/cpu/int8/convolution_1x1_int8.h | 4 +- .../kernel/cpu/int8/convolution_3x3_int8.cc | 4 +- .../kernel/cpu/int8/convolution_3x3_int8.h | 4 +- .../int8/convolution_depthwise_3x3_int8.cc | 4 +- .../cpu/int8/convolution_depthwise_3x3_int8.h | 4 +- .../cpu/int8/convolution_depthwise_int8.cc | 4 +- .../cpu/int8/convolution_depthwise_int8.h | 4 +- .../convolution_depthwise_slidewindow_int8.cc | 4 +- .../convolution_depthwise_slidewindow_int8.h | 4 +- .../kernel/cpu/int8/convolution_int8.cc | 4 +- .../litert/kernel/cpu/int8/convolution_int8.h | 4 +- .../cpu/int8/convolution_int8_creator.cc | 4 +- .../cpu/int8/convolution_int8_creator.h | 2 +- .../src/litert/kernel/cpu/int8/crop_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/crop_int8.h | 4 +- .../cpu/int8/deconvolution_depthwise_int8.cc | 4 +- .../cpu/int8/deconvolution_depthwise_int8.h | 4 +- .../kernel/cpu/int8/deconvolution_int8.cc | 4 +- .../kernel/cpu/int8/deconvolution_int8.h | 4 +- .../kernel/cpu/int8/depth_to_space_int8.cc | 4 +- .../kernel/cpu/int8/depth_to_space_int8.h | 4 +- .../cpu/int8/detection_post_process_int8.cc | 4 +- .../cpu/int8/detection_post_process_int8.h | 4 +- .../src/litert/kernel/cpu/int8/div_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/div_int8.h | 4 +- .../kernel/cpu/int8/dynamic_gather_int8.cc | 4 +- .../kernel/cpu/int8/dynamic_gather_int8.h | 4 +- .../litert/kernel/cpu/int8/dynamic_quant.cc | 4 +- .../litert/kernel/cpu/int8/dynamic_quant.h | 4 +- .../kernel/cpu/int8/fullconnection_int8.cc | 4 +- .../kernel/cpu/int8/fullconnection_int8.h | 4 +- .../litert/kernel/cpu/int8/gatherNd_int8.cc | 4 +- .../litert/kernel/cpu/int8/gatherNd_int8.h | 4 +- .../src/litert/kernel/cpu/int8/gather_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/gather_int8.h | 4 +- .../kernel/cpu/int8/group_convolution_int8.cc | 4 +- .../kernel/cpu/int8/group_convolution_int8.h | 4 +- .../src/litert/kernel/cpu/int8/hswish_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/hswish_int8.h | 4 +- .../litert/kernel/cpu/int8/l2_norm_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/l2_norm_int8.h | 4 +- .../litert/kernel/cpu/int8/layer_norm_int8.cc | 4 +- .../litert/kernel/cpu/int8/layer_norm_int8.h | 4 +- .../litert/kernel/cpu/int8/leaky_relu_int8.cc | 4 +- .../litert/kernel/cpu/int8/leaky_relu_int8.h | 4 +- .../kernel/cpu/int8/matmul_base_int8.cc | 4 +- .../litert/kernel/cpu/int8/matmul_base_int8.h | 4 +- .../cpu/int8/matmul_dynamic_base_int8.cc | 4 +- .../cpu/int8/matmul_dynamic_base_int8.h | 4 +- .../kernel/cpu/int8/matmul_dynamic_int8.cc | 4 +- .../kernel/cpu/int8/matmul_dynamic_int8.h | 4 +- .../cpu/int8/matmul_dynamic_sdot_int8.cc | 4 +- .../cpu/int8/matmul_dynamic_sdot_int8.h | 4 +- .../src/litert/kernel/cpu/int8/matmul_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/matmul_int8.h | 4 +- .../src/litert/kernel/cpu/int8/mul_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/mul_int8.h | 4 +- .../src/litert/kernel/cpu/int8/pad_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/pad_int8.h | 4 +- .../litert/kernel/cpu/int8/pooling_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/pooling_int8.h | 4 +- .../src/litert/kernel/cpu/int8/power_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/power_int8.h | 4 +- .../src/litert/kernel/cpu/int8/reduce_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/reduce_int8.h | 4 +- .../src/litert/kernel/cpu/int8/relux_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/relux_int8.h | 4 +- .../litert/kernel/cpu/int8/reshape_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/reshape_int8.h | 4 +- .../src/litert/kernel/cpu/int8/resize_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/resize_int8.h | 4 +- .../src/litert/kernel/cpu/int8/scale_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/scale_int8.h | 4 +- .../litert/kernel/cpu/int8/sigmoid_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/sigmoid_int8.h | 4 +- .../src/litert/kernel/cpu/int8/slice_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/slice_int8.h | 4 +- .../litert/kernel/cpu/int8/softmax_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/softmax_int8.h | 4 +- .../kernel/cpu/int8/space_to_batch_int8.cc | 4 +- .../kernel/cpu/int8/space_to_batch_int8.h | 4 +- .../src/litert/kernel/cpu/int8/split_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/split_int8.h | 4 +- .../litert/kernel/cpu/int8/squeeze_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/squeeze_int8.h | 4 +- .../src/litert/kernel/cpu/int8/sub_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/sub_int8.h | 4 +- .../src/litert/kernel/cpu/int8/tanh_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/tanh_int8.h | 4 +- .../src/litert/kernel/cpu/int8/topk_int8.cc | 4 +- .../src/litert/kernel/cpu/int8/topk_int8.h | 4 +- .../litert/kernel/cpu/int8/transpose_int8.cc | 4 +- .../litert/kernel/cpu/int8/transpose_int8.h | 4 +- .../litert/kernel/cpu/int8/unsqueeze_int8.cc | 4 +- .../litert/kernel/cpu/int8/unsqueeze_int8.h | 4 +- .../src/litert/kernel/cpu/nnacl/cxx_utils.cc | 4 +- .../src/litert/kernel/cpu/nnacl/cxx_utils.h | 4 +- .../kernel/cpu/nnacl/nnacl_batchnorm.cc | 4 +- .../litert/kernel/cpu/nnacl/nnacl_batchnorm.h | 4 +- .../kernel/cpu/nnacl/nnacl_convolution.cc | 4 +- .../kernel/cpu/nnacl/nnacl_convolution.h | 4 +- .../cpu/nnacl/nnacl_fused_batch_norm.cc | 4 +- .../kernel/cpu/nnacl/nnacl_fused_batch_norm.h | 4 +- .../litert/kernel/cpu/nnacl/nnacl_kernel.cc | 4 +- .../litert/kernel/cpu/nnacl/nnacl_kernel.h | 4 +- .../litert/kernel/cpu/nnacl/nnacl_manager.cc | 4 +- .../litert/kernel/cpu/nnacl/nnacl_manager.h | 4 +- .../litert/kernel/cpu/nnacl/nnacl_matmul.cc | 4 +- .../litert/kernel/cpu/nnacl/nnacl_matmul.h | 4 +- .../cpu/nnacl/nnacl_non_max_suppression.cc | 4 +- .../cpu/nnacl/nnacl_non_max_suppression.h | 4 +- .../litert/kernel/cpu/nnacl/nnacl_reduce.cc | 4 +- .../litert/kernel/cpu/nnacl/nnacl_reduce.h | 4 +- .../litert/kernel/cpu/nnacl/nnacl_reshape.cc | 4 +- .../litert/kernel/cpu/nnacl/nnacl_reshape.h | 4 +- .../kernel/cpu/nnacl/nnacl_strided_slice.cc | 4 +- .../kernel/cpu/nnacl/nnacl_strided_slice.h | 4 +- .../litert/kernel/cpu/nnacl/nnacl_where.cc | 4 +- .../src/litert/kernel/cpu/nnacl/nnacl_where.h | 4 +- .../kernel/cpu/string/extract_feature.cc | 4 +- .../kernel/cpu/string/extract_feature.h | 4 +- .../kernel/cpu/string/hashtable_lookup.cc | 4 +- .../kernel/cpu/string/hashtable_lookup.h | 4 +- .../kernel/cpu/string/lsh_projection.cc | 4 +- .../litert/kernel/cpu/string/lsh_projection.h | 4 +- .../src/litert/kernel/cpu/string/normalize.cc | 4 +- .../src/litert/kernel/cpu/string/normalize.h | 4 +- .../src/litert/kernel/cpu/string/predict.cc | 4 +- .../src/litert/kernel/cpu/string/predict.h | 4 +- .../src/litert/kernel/cpu/string/skip_gram.cc | 4 +- .../src/litert/kernel/cpu/string/skip_gram.h | 4 +- .../litert/kernel/opencl/kernel/activation.cc | 4 +- .../litert/kernel/opencl/kernel/activation.h | 4 +- .../litert/kernel/opencl/kernel/argminmax.cc | 4 +- .../litert/kernel/opencl/kernel/argminmax.h | 4 +- .../litert/kernel/opencl/kernel/arithmetic.cc | 4 +- .../litert/kernel/opencl/kernel/arithmetic.h | 4 +- .../kernel/opencl/kernel/arithmetic_self.cc | 4 +- .../kernel/opencl/kernel/arithmetic_self.h | 4 +- .../kernel/opencl/kernel/batch_to_space_nd.cc | 4 +- .../kernel/opencl/kernel/batch_to_space_nd.h | 4 +- .../litert/kernel/opencl/kernel/batchnorm.cc | 4 +- .../litert/kernel/opencl/kernel/batchnorm.h | 4 +- .../src/litert/kernel/opencl/kernel/cast.cc | 4 +- .../src/litert/kernel/opencl/kernel/cast.h | 4 +- .../src/litert/kernel/opencl/kernel/concat.cc | 4 +- .../src/litert/kernel/opencl/kernel/concat.h | 4 +- .../src/litert/kernel/opencl/kernel/conv2d.cc | 4 +- .../src/litert/kernel/opencl/kernel/conv2d.h | 4 +- .../kernel/opencl/kernel/conv2d_transpose.cc | 4 +- .../kernel/opencl/kernel/conv2d_transpose.h | 4 +- .../src/litert/kernel/opencl/kernel/crop.cc | 4 +- .../src/litert/kernel/opencl/kernel/crop.h | 4 +- .../kernel/opencl/kernel/depthwise_conv2d.cc | 4 +- .../kernel/opencl/kernel/depthwise_conv2d.h | 4 +- .../src/litert/kernel/opencl/kernel/fill.cc | 4 +- .../src/litert/kernel/opencl/kernel/fill.h | 4 +- .../kernel/opencl/kernel/fullconnection.cc | 4 +- .../kernel/opencl/kernel/fullconnection.h | 4 +- .../kernel/opencl/kernel/fusion_eltwise.cc | 4 +- .../kernel/opencl/kernel/fusion_eltwise.h | 4 +- .../src/litert/kernel/opencl/kernel/gather.cc | 4 +- .../src/litert/kernel/opencl/kernel/gather.h | 4 +- .../litert/kernel/opencl/kernel/gl_to_cl.cc | 4 +- .../litert/kernel/opencl/kernel/gl_to_cl.h | 4 +- .../opencl/kernel/int8/arithmetic_int8.cc | 4 +- .../opencl/kernel/int8/arithmetic_int8.h | 4 +- .../litert/kernel/opencl/kernel/layer_norm.cc | 4 +- .../litert/kernel/opencl/kernel/layer_norm.h | 4 +- .../src/litert/kernel/opencl/kernel/matmul.cc | 4 +- .../src/litert/kernel/opencl/kernel/matmul.h | 4 +- .../litert/kernel/opencl/kernel/one_hot.cc | 4 +- .../src/litert/kernel/opencl/kernel/one_hot.h | 4 +- .../src/litert/kernel/opencl/kernel/pad.cc | 4 +- .../src/litert/kernel/opencl/kernel/pad.h | 4 +- .../litert/kernel/opencl/kernel/pooling2d.cc | 4 +- .../litert/kernel/opencl/kernel/pooling2d.h | 4 +- .../src/litert/kernel/opencl/kernel/power.cc | 4 +- .../src/litert/kernel/opencl/kernel/power.h | 4 +- .../src/litert/kernel/opencl/kernel/prelu.cc | 4 +- .../src/litert/kernel/opencl/kernel/prelu.h | 4 +- .../src/litert/kernel/opencl/kernel/reduce.cc | 4 +- .../src/litert/kernel/opencl/kernel/reduce.h | 4 +- .../litert/kernel/opencl/kernel/reshape.cc | 4 +- .../src/litert/kernel/opencl/kernel/reshape.h | 4 +- .../src/litert/kernel/opencl/kernel/resize.cc | 4 +- .../src/litert/kernel/opencl/kernel/resize.h | 4 +- .../src/litert/kernel/opencl/kernel/scale.cc | 4 +- .../src/litert/kernel/opencl/kernel/scale.h | 4 +- .../litert/kernel/opencl/kernel/softmax.cc | 4 +- .../src/litert/kernel/opencl/kernel/softmax.h | 4 +- .../kernel/opencl/kernel/space_to_batch_nd.cc | 4 +- .../kernel/opencl/kernel/space_to_batch_nd.h | 4 +- .../kernel/opencl/kernel/space_to_depth.cc | 4 +- .../kernel/opencl/kernel/space_to_depth.h | 4 +- .../kernel/opencl/kernel/sparse_to_dense.cc | 4 +- .../kernel/opencl/kernel/sparse_to_dense.h | 4 +- .../src/litert/kernel/opencl/kernel/split.cc | 4 +- .../src/litert/kernel/opencl/kernel/split.h | 4 +- .../src/litert/kernel/opencl/kernel/stack.cc | 4 +- .../src/litert/kernel/opencl/kernel/stack.h | 4 +- .../litert/kernel/opencl/kernel/strassen.cc | 4 +- .../litert/kernel/opencl/kernel/strassen.h | 4 +- .../kernel/opencl/kernel/strided_slice.cc | 4 +- .../kernel/opencl/kernel/strided_slice.h | 4 +- .../litert/kernel/opencl/kernel/to_format.cc | 4 +- .../litert/kernel/opencl/kernel/to_format.h | 4 +- .../litert/kernel/opencl/kernel/transpose.cc | 4 +- .../litert/kernel/opencl/kernel/transpose.h | 4 +- .../litert/kernel/opencl/kernel/winograd.cc | 4 +- .../litert/kernel/opencl/kernel/winograd.h | 4 +- .../src/litert/kernel/opencl/opencl_fusion.cc | 4 +- .../src/litert/kernel/opencl/opencl_kernel.cc | 4 +- .../src/litert/kernel/opencl/opencl_kernel.h | 4 +- .../litert/kernel/opencl/opencl_subgraph.cc | 4 +- .../litert/kernel/opencl/opencl_subgraph.h | 4 +- .../src/litert/kernel/opencl/utils.cc | 4 +- .../src/litert/kernel/opencl/utils.h | 4 +- mindspore-lite/src/litert/kernel_exec_util.cc | 4 +- mindspore-lite/src/litert/kernel_exec_util.h | 4 +- mindspore-lite/src/litert/lite_kernel.cc | 4 +- mindspore-lite/src/litert/lite_kernel.h | 4 +- mindspore-lite/src/litert/model_manager.cc | 4 +- mindspore-lite/src/litert/model_manager.h | 4 +- .../src/litert/runtime_allocator.cc | 4 +- mindspore-lite/src/litert/runtime_allocator.h | 4 +- .../src/registry/kernel_interface_registry.cc | 4 +- .../src/registry/kernel_interface_registry.h | 4 +- .../src/registry/register_kernel.cc | 4 +- .../src/registry/register_kernel_impl.cc | 4 +- .../src/registry/register_kernel_impl.h | 4 +- .../src/registry/register_kernel_interface.cc | 4 +- mindspore-lite/src/train/loss_kernel.h | 4 +- mindspore-lite/src/train/opt_allocator.cc | 4 +- mindspore-lite/src/train/opt_allocator.h | 4 +- .../train/optimizer/common/fusion_utils.cc | 4 +- .../src/train/optimizer/common/fusion_utils.h | 4 +- mindspore-lite/src/train/optimizer_kernel.h | 4 +- mindspore-lite/src/train/static_allocator.h | 4 +- .../src/train/train_populate_parameter.cc | 4 +- .../src/train/train_populate_parameter.h | 4 +- .../src/train/train_populate_parameter_v0.h | 4 +- mindspore-lite/test/common/common_test.cc | 4 +- mindspore-lite/test/main.cc | 2 +- .../test/st/mindrt_parallel_test.cc | 4 +- mindspore-lite/test/st/mix_data_type_test.cc | 4 +- mindspore-lite/test/st/sub_graph_test.cc | 12 +- .../test/ut/nnacl/infer/adam_infer_test.cc | 4 +- .../infer/adam_weight_decay_infer_test.cc | 4 +- .../test/ut/nnacl/infer/addn_infer_test.cc | 4 +- .../nnacl/infer/apply_momentum_infer_test.cc | 4 +- .../test/ut/nnacl/infer/argmax_infer_test.cc | 4 +- .../test/ut/nnacl/infer/argmin_infer_test.cc | 4 +- .../infer/arithmetic_compare_infer_test.cc | 4 +- .../ut/nnacl/infer/arithmetic_infer_test.cc | 4 +- .../ut/nnacl/infer/assign_add_infer_test.cc | 4 +- .../test/ut/nnacl/infer/assign_infer_test.cc | 4 +- .../infer/audio_spectrogram_infer_test.cc | 4 +- .../nnacl/infer/batch_to_space_infer_test.cc | 4 +- .../ut/nnacl/infer/bias_grad_infer_test.cc | 4 +- .../infer/binary_cross_entropy_infer_test.cc | 4 +- .../test/ut/nnacl/infer/bn_grad_infer_test.cc | 4 +- .../ut/nnacl/infer/broadcast_to_infer_test.cc | 4 +- .../test/ut/nnacl/infer/cast_infer_test.cc | 4 +- .../test/ut/nnacl/infer/concat_infer_test.cc | 4 +- .../infer/constant_of_shape_infer_test.cc | 4 +- .../infer/conv2d_grad_filter_infer_test.cc | 4 +- .../infer/conv2d_grad_input_infer_test.cc | 4 +- .../test/ut/nnacl/infer/conv2d_infer_test.cc | 4 +- .../nnacl/infer/crop_and_resize_infer_test.cc | 4 +- .../test/ut/nnacl/infer/crop_infer_test.cc | 4 +- .../test/ut/nnacl/infer/cumsum_infer_test.cc | 4 +- .../custom_extract_features_infer_test.cc | 4 +- .../infer/custom_normalize_infer_test.cc | 4 +- .../nnacl/infer/custom_predict_infer_test.cc | 4 +- .../ut/nnacl/infer/deconv2d_infer_test.cc | 4 +- .../nnacl/infer/depth_to_space_infer_test.cc | 4 +- .../infer/depthwise_conv2d_infer_test.cc | 4 +- .../detection_post_process_infer_test.cc | 4 +- .../ut/nnacl/infer/dropout_grad_infer_test.cc | 4 +- .../infer/embedding_lookup_infer_test.cc | 4 +- .../ut/nnacl/infer/expand_dims_infer_test.cc | 4 +- .../ut/nnacl/infer/fft_imag_infer_test.cc | 4 +- .../test/ut/nnacl/infer/fill_infer_test.cc | 4 +- .../ut/nnacl/infer/flatten_grad_infer_test.cc | 4 +- .../test/ut/nnacl/infer/flatten_infer_test.cc | 4 +- .../nnacl/infer/full_connection_infer_test.cc | 4 +- .../nnacl/infer/fused_batchnorm_infer_test.cc | 4 +- .../test/ut/nnacl/infer/gather_infer_test.cc | 4 +- .../ut/nnacl/infer/gather_nd_infer_test.cc | 4 +- .../group_conv2d_grad_input_infer_test.cc | 4 +- .../test/ut/nnacl/infer/gru_infer_test.cc | 4 +- .../infer/hashtable_lookup_infer_test.cc | 4 +- .../infer/invert_permutation_infer_test.cc | 4 +- .../ut/nnacl/infer/layer_norm_infer_test.cc | 4 +- .../nnacl/infer/lsh_projection_infer_test.cc | 4 +- .../test/ut/nnacl/infer/lstm_infer_test.cc | 4 +- .../test/ut/nnacl/infer/matmul_infer_test.cc | 4 +- .../ut/nnacl/infer/max_min_grad_infer_test.cc | 4 +- .../test/ut/nnacl/infer/mfcc_infer_test.cc | 4 +- .../ut/nnacl/infer/nllloss_grad_infer_test.cc | 4 +- .../test/ut/nnacl/infer/nllloss_infer_test.cc | 4 +- .../test/ut/nnacl/infer/one_hot_infer_test.cc | 4 +- .../test/ut/nnacl/infer/pad_infer_test.cc | 4 +- .../ut/nnacl/infer/pooling_grad_infer_test.cc | 4 +- .../test/ut/nnacl/infer/pooling_infer_test.cc | 4 +- .../test/ut/nnacl/infer/power_infer_test.cc | 4 +- .../infer/quant_dtype_cast_infer_test.cc | 4 +- .../random_standard_normal_infer_test.cc | 4 +- .../test/ut/nnacl/infer/range_infer_test.cc | 4 +- .../test/ut/nnacl/infer/rank_infer_test.cc | 4 +- .../test/ut/nnacl/infer/reduce_infer_test.cc | 4 +- .../test/ut/nnacl/infer/reshape_infer_test.cc | 4 +- .../test/ut/nnacl/infer/resize_infer_test.cc | 4 +- .../test/ut/nnacl/infer/rfft_infer_test.cc | 4 +- .../ut/nnacl/infer/roi_pooling_infer_test.cc | 4 +- .../nnacl/infer/scatter_nd_add_infer_test.cc | 4 +- .../ut/nnacl/infer/scatter_nd_infer_test.cc | 4 +- .../test/ut/nnacl/infer/select_infer_test.cc | 4 +- .../test/ut/nnacl/infer/sgd_infer_test.cc | 4 +- .../test/ut/nnacl/infer/shape_infer_test.cc | 4 +- .../test/ut/nnacl/infer/size_infer_test.cc | 4 +- .../ut/nnacl/infer/skip_gram_infer_test.cc | 4 +- .../test/ut/nnacl/infer/slice_infer_test.cc | 4 +- .../infer/softmax_cross_entropy_infer_test.cc | 4 +- .../test/ut/nnacl/infer/softmax_infer_test.cc | 4 +- .../nnacl/infer/space_to_batch_infer_test.cc | 4 +- .../infer/space_to_batch_nd_infer_test.cc | 4 +- .../nnacl/infer/space_to_depth_infer_test.cc | 4 +- .../nnacl/infer/sparse_to_dense_infer_test.cc | 4 +- .../test/ut/nnacl/infer/split_infer_test.cc | 4 +- .../test/ut/nnacl/infer/squeeze_infer_test.cc | 4 +- .../test/ut/nnacl/infer/stack_infer_test.cc | 4 +- .../nnacl/infer/strided_slice_infer_test.cc | 4 +- .../infer/tensorlist_fromtensor_infer_test.cc | 4 +- .../infer/tensorlist_getitem_infer_test.cc | 4 +- .../infer/tensorlist_reserve_infer_test.cc | 4 +- .../infer/tensorlist_setitem_infer_test.cc | 4 +- .../infer/tensorlist_stack_infer_test.cc | 4 +- .../test/ut/nnacl/infer/tile_infer_test.cc | 4 +- .../test/ut/nnacl/infer/topk_infer_test.cc | 4 +- .../ut/nnacl/infer/transpose_infer_test.cc | 4 +- .../test/ut/nnacl/infer/unique_infer_test.cc | 4 +- .../infer/unsorted_segment_sum_infer_test.cc | 4 +- .../ut/nnacl/infer/unsqueeze_infer_test.cc | 4 +- .../test/ut/nnacl/infer/unstack_infer_test.cc | 4 +- .../test/ut/nnacl/infer/where_infer_test.cc | 4 +- .../nnacl/int8/quant_dtype_cast_int8_test.cc | 4 +- .../test/ut/nnacl/kernel/cast_test.cc | 4 +- .../test/ut/src/api/context_c_test.cc | 4 +- .../test/ut/src/api/generic_api_test.cc | 4 +- .../test/ut/src/api/model_c_test.cc | 4 +- .../test/ut/src/api/model_group_test.cc | 4 +- .../ut/src/api/model_parallel_runner_test.cc | 4 +- .../test/ut/src/api/tensor_c_test.cc | 4 +- .../ut/src/dynamic_library_loader_test.cc | 86 +- mindspore-lite/test/ut/src/graph_test.cc | 4 +- mindspore-lite/test/ut/src/infer_test.cc | 4 +- .../test/ut/src/lite_mindrt_test.cc | 4 +- .../src/registry/registry_custom_op_test.cc | 4 +- .../registry/registry_gpu_custom_op_test.cc | 4 +- .../test/ut/src/registry/registry_test.cc | 4 +- .../src/runtime/dynamic_mem_manager_test.cc | 4 +- .../runtime/kernel/arm/common/pack_tests.cc | 4 +- .../kernel/arm/common/strided_slice_tests.cc | 4 +- .../runtime/kernel/arm/cxx_api/model_test.cc | 4 +- .../kernel/arm/cxx_api/serialization_test.cc | 4 +- .../fp16_grad/activation_grad_fp16_test.cc | 4 +- .../arithmetic_fp16_self_grad_tests.cc | 4 +- .../arm/fp32-sparsity/matmul_fp32_tests.cc | 4 +- .../kernel/arm/fp32/activation_fp32_test.cc | 4 +- .../arm/fp32/batch_to_space_fp32_test.cc | 4 +- .../kernel/arm/fp32/batchnorm_fp32_tests.cc | 4 +- .../arm/fp32/constant_of_shape_fp32_test.cc | 4 +- .../kernel/arm/fp32/conv1x1_fp32_tests.cc | 4 +- .../fp32/convolution_depthwise_fp32_tests.cc | 4 +- .../kernel/arm/fp32/convolution_fp32_tests.cc | 4 +- .../runtime/kernel/arm/fp32/crop_fp32_test.cc | 4 +- .../runtime/kernel/arm/fp32/cumsum_tests.cc | 4 +- .../arm/fp32/deconvolution_fp32_tests.cc | 4 +- .../arm/fp32/depth_to_space_fp32_test.cc | 4 +- .../arm/fp32/detection_post_process_test.cc | 4 +- .../arm/fp32/embedding_lookup_fp32_test.cc | 4 +- .../arm/fp32/fullconnection_fp32_tests.cc | 4 +- .../kernel/arm/fp32/l2norm_fp32_test.cc | 4 +- .../kernel/arm/fp32/logicalor_fp32_test.cc | 4 +- .../arm/fp32/lsh_projection_fp32_tests.cc | 4 +- .../kernel/arm/fp32/lstm_fp32_tests.cc | 4 +- .../kernel/arm/fp32/matmul_fp32_tests.cc | 4 +- .../kernel/arm/fp32/nllloss_fp32_test.cc | 4 +- .../fp32/non_max_suppression_fp32_tests.cc | 4 +- .../kernel/arm/fp32/one_hot_fp32_test.cc | 4 +- .../kernel/arm/fp32/power_fp32_tests.cc | 4 +- .../arm/fp32/ragged_range_fp32_tests.cc | 4 +- .../kernel/arm/fp32/reduce_fp32_tests.cc | 4 +- .../arm/fp32/resize_bilinear_fp32_tests.cc | 4 +- .../resize_nearest_neighbor_fp32_tests.cc | 4 +- .../arm/fp32/reverse_sequence_fp32_tests.cc | 4 +- .../kernel/arm/fp32/roi_pooling_fp32_tests.cc | 4 +- .../kernel/arm/fp32/scale_fp32_tests.cc | 4 +- .../arm/fp32/scatter_nd_add_fp32_test.cc | 4 +- .../kernel/arm/fp32/scatter_nd_fp32_tests.cc | 4 +- .../runtime/kernel/arm/fp32/skip_gram_fp32.cc | 4 +- .../runtime/kernel/arm/fp32/softmax_tests.cc | 4 +- .../arm/fp32/space_to_batch_fp32_tests.cc | 4 +- .../arm/fp32/space_to_depth_fp32_tests.cc | 4 +- .../arm/fp32/sparse_to_dense_fp32_tests.cc | 4 +- .../kernel/arm/fp32/stack_fp32_test.cc | 4 +- .../kernel/arm/fp32/tile_fp32_tests.cc | 4 +- .../kernel/arm/fp32/topk_fp32_tests.cc | 4 +- .../kernel/arm/fp32/transpose_fp32_tests.cc | 4 +- .../kernel/arm/fp32/uniform_real_fp32_test.cc | 4 +- .../kernel/arm/fp32/unique_fp32_tests.cc | 4 +- .../kernel/arm/fp32/unstack_fp32_tests.cc | 4 +- .../fp32_grad/activation_grad_fp32_tests.cc | 4 +- .../fp32_grad/arithmetic_grad_fp32_tests.cc | 4 +- .../arm/fp32_grad/bias_grad_fp32_tests.cc | 4 +- .../kernel/arm/fp32_grad/bn_grad_fp32_test.cc | 4 +- .../fp32_grad/convolution_grad_fp32_tests.cc | 4 +- .../deconvolution_grad_fp32_tests.cc | 4 +- .../kernel/arm/fp32_grad/network_test.cc | 4 +- .../arm/fp32_grad/nllloss_grad_fp32_test.cc | 4 +- .../arm/fp32_grad/pooling_grad_fp32_tests.cc | 4 +- .../softmax_crossentropy_fp32_tests.cc | 4 +- .../arm/fp32_grad/softmax_grad_fp32_tests.cc | 4 +- .../runtime/kernel/arm/int8/add_int8_tests.cc | 4 +- .../arm/int8/arithmetic_self_int8_tests.cc | 4 +- .../kernel/arm/int8/batchnorm_int8_test.cc | 4 +- .../kernel/arm/int8/concat_int8_tests.cc | 4 +- .../kernel/arm/int8/conv_1x1_int8_tests.cc | 4 +- .../kernel/arm/int8/crop_int8_tests.cc | 4 +- .../kernel/arm/int8/deconv_int8_tests.cc | 4 +- .../arm/int8/fullconnection_int8_tests.cc | 4 +- .../kernel/arm/int8/gatherNd_int8_test.cc | 4 +- .../kernel/arm/int8/gather_int8_test.cc | 4 +- .../kernel/arm/int8/hswish_int8_tests.cc | 4 +- .../kernel/arm/int8/l2_norm_int8_tests.cc | 4 +- .../kernel/arm/int8/matmul_int8_tests.cc | 4 +- .../runtime/kernel/arm/int8/mul_int8_tests.cc | 4 +- .../runtime/kernel/arm/int8/pad_int8_tests.cc | 4 +- .../kernel/arm/int8/power_int8_tests.cc | 4 +- .../kernel/arm/int8/prelu_int8_tests.cc | 4 +- .../kernel/arm/int8/quant_dtype_cast_tests.cc | 4 +- .../kernel/arm/int8/reduce_int8_tests.cc | 4 +- .../kernel/arm/int8/relux_int8_tests.cc | 4 +- .../kernel/arm/int8/reshape_int8_tests.cc | 4 +- .../arm/int8/resize_bilinear_int8_tests.cc | 4 +- .../resize_nearest_neighbor_int8_tests.cc | 4 +- .../src/runtime/kernel/arm/int8/scale_int8.cc | 4 +- .../kernel/arm/int8/sigmoid_int8_tests.cc | 4 +- .../kernel/arm/int8/slice_int8_tests.cc | 4 +- .../kernel/arm/int8/softmax_int8_tests.cc | 4 +- .../arm/int8/space_to_batch_int8_tests.cc | 4 +- .../kernel/arm/int8/split_int8_tests.cc | 4 +- .../kernel/arm/int8/squeeze_int8_tests.cc | 4 +- .../runtime/kernel/arm/int8/sub_int_tests.cc | 4 +- .../kernel/arm/int8/topk_int8_tests.cc | 4 +- .../kernel/arm/int8/unsqueeze_int8_tests.cc | 4 +- .../runtime/kernel/arm/string/normalize.cc | 4 +- .../src/runtime/kernel/common_utils_test.cc | 4 +- .../runtime/kernel/cuda/batchtospace_tests.cc | 4 +- .../ut/src/runtime/kernel_executor_tests.cc | 4 +- .../ut/src/runtime/runtime_convert_tests.cc | 4 +- mindspore-lite/test/ut/src/utils_test.cc | 132 +- .../acl/mapper/activation_mapper_test.cc | 4 +- .../acl/mapper/argmax_fusion_mapper_test.cc | 4 +- .../acl/mapper/argmin_fusion_mapper_test.cc | 4 +- .../acl/mapper/arithmetic_mapper_test.cc | 4 +- .../adapter/acl/mapper/clip_mapper_test.cc | 4 +- .../acl/mapper/fused_batchnorm_mapper_test.cc | 4 +- .../onnx/onnx_layer_norm_parser_test.cc | 4 +- .../tflite/tflite_activation_parser_test.cc | 4 +- .../parser/tflite/tflite_addn_parser_test.cc | 4 +- .../tflite/tflite_argmax_parser_test.cc | 4 +- .../tflite/tflite_argmin_parser_test.cc | 4 +- .../tflite/tflite_arithmetic_parser_test.cc | 4 +- .../tflite_batch_to_space_nd_parser_test.cc | 4 +- .../parser/tflite/tflite_cast_parser_test.cc | 4 +- .../tflite/tflite_concat_parser_test.cc | 4 +- .../parser/tflite/tflite_conv_parser_test.cc | 4 +- .../tflite/tflite_deconv_parser_test.cc | 4 +- .../tflite_depth_to_space_parser_test.cc | 4 +- .../tflite_depthwise_conv_parser_test.cc | 4 +- .../parser/tflite/tflite_fill_parser_test.cc | 4 +- .../tflite/tflite_gather_nd_parser_test.cc | 4 +- .../tflite/tflite_gather_parser_test.cc | 4 +- .../tflite/tflite_l2norm_parser_test.cc | 4 +- .../tflite/tflite_logical_parser_test.cc | 4 +- .../parser/tflite/tflite_lrn_parser_test.cc | 4 +- .../tflite/tflite_one_hot_parser_test.cc | 4 +- .../parser/tflite/tflite_pad_parser_test.cc | 4 +- .../tflite/tflite_parsers_test_utils.cc | 4 +- .../parser/tflite/tflite_parsers_test_utils.h | 4 +- .../tflite/tflite_pooling_parser_test.cc | 4 +- .../tflite/tflite_reduce_parser_test.cc | 4 +- .../tflite/tflite_reshape_parser_test.cc | 4 +- .../tflite/tflite_resize_parser_test.cc | 4 +- .../tflite/tflite_reverse_parser_test.cc | 4 +- .../tflite_reverse_sequence_parser_test.cc | 4 +- .../parser/tflite/tflite_slice_parser_test.cc | 4 +- .../tflite/tflite_softmax_parser_test.cc | 4 +- .../tflite_space_to_batch_nd_parser_test.cc | 4 +- .../tflite_space_to_depth_parser_test.cc | 4 +- .../tflite_sparse_to_dense_parser_test.cc | 4 +- .../parser/tflite/tflite_split_parser_test.cc | 4 +- .../tflite/tflite_split_v_parser_test.cc | 4 +- .../parser/tflite/tflite_stack_parser_test.cc | 4 +- .../tflite_strided_slice_parser_test.cc | 4 +- .../parser/tflite/tflite_tile_parser_test.cc | 4 +- .../tflite/tflite_topk_v2_parser_test.cc | 4 +- .../tflite/tflite_transpose_parser_test.cc | 4 +- .../tflite/tflite_unique_parser_test.cc | 4 +- .../tflite/tflite_unstack_parser_test.cc | 4 +- .../registry/model_parser_registry_test.cc | 4 +- .../registry/node_parser_registry_test.cc | 4 +- .../registry/parser/model_parser_test.cc | 4 +- .../registry/parser/model_parser_test.h | 4 +- .../registry/parser/node_parser_test.cc | 4 +- .../registry/parser/node_parser_test.h | 4 +- .../registry/pass_registry_position_ascend.cc | 4 +- .../converter/registry/pass_registry_test.cc | 4 +- .../fusion/activation_fusion_test.cc | 4 +- .../fusion/add_concat_act_fusion_test.cc | 4 +- .../fusion/constant_folding_fusion_test.cc | 4 +- .../fusion/conv_activation_fusion_test.cc | 4 +- .../fusion/conv_biasadd_fusion_test.cc | 4 +- .../optimizer/fusion/conv_bn_fusion_test.cc | 4 +- .../fusion/conv_scale_fusion_test.cc | 4 +- .../activation_fusion_inout_test.cc | 4 +- .../add_concat_act_fusion_inout_test.cc | 4 +- .../conv_act_fusion_inout_test.cc | 4 +- .../conv_bias_fusion_inout_test.cc | 4 +- .../conv_fusion_inout_test.cc | 4 +- .../conv_fusion_inout_test.h | 4 +- .../fusion_inout_test/fusion_inout_test.cc | 4 +- .../fusion_inout_test/fusion_inout_test.h | 4 +- .../matmul_act_fusion_inout_test.cc | 4 +- .../matmul_fusion_inout_test.cc | 4 +- .../matmul_fusion_inout_test.h | 4 +- .../matmul_mul_fusion_inout_test.cc | 4 +- .../trans_matmul_fusion_inout_test.cc | 4 +- .../fusion/matmul_mul_fusion_test.cc | 4 +- .../fusion/trans_matmul_fusion_test.cc | 4 +- .../tools/benchmark/benchmark_c_api.cc | 4 +- .../tools/benchmark/benchmark_c_api.h | 4 +- .../tools/common/custom_ascend_utils.cc | 4 +- .../tools/common/custom_ascend_utils.h | 4 +- .../tools/common/func_graph_utils.cc | 4 +- .../tools/common/func_graph_utils.h | 4 +- mindspore-lite/tools/common/opengl_util.cc | 4 +- mindspore-lite/tools/common/opengl_util.h | 4 +- .../tools/converter/adapter/acl/acl_pass.cc | 4 +- .../tools/converter/adapter/acl/acl_pass.h | 4 +- .../acl/cxx_api_lite/cxx_api/acl_utils.h | 4 +- .../acl/cxx_api_lite/cxx_api/any_utils.cc | 4 +- .../acl/cxx_api_lite/cxx_api/any_utils.h | 4 +- .../acl/cxx_api_lite/cxx_api/context.cc | 4 +- .../acl/cxx_api_lite/cxx_api/dlutils.h | 4 +- .../graph/acl/acl_convert_init_adapter.cc | 6 +- .../graph/acl/acl_convert_init_adapter.h | 4 +- .../cxx_api_lite/cxx_api/graph/graph_data.cc | 4 +- .../cxx_api_lite/cxx_api/graph/graph_data.h | 4 +- .../cxx_api/model/acl/acl_model_options.cc | 4 +- .../cxx_api/model/acl/acl_model_options.h | 4 +- .../cxx_api/model/acl/model_converter.cc | 4 +- .../cxx_api/model/acl/model_converter.h | 4 +- .../cxx_api/model/aoe/auto_tune_process.cc | 4 +- .../cxx_api/model/aoe/auto_tune_process.h | 4 +- .../acl/cxx_api_lite/cxx_api/serialization.cc | 4 +- .../acl/infer/flash_attention_infer.cc | 8 +- .../adapter/acl/infer/flash_attention_infer.h | 4 +- .../acl/infer/forward_rasterize_infer.cc | 8 +- .../acl/infer/forward_rasterize_infer.h | 4 +- .../adapter/acl/plugin/acl_pass_plugin.cc | 4 +- .../adapter/acl/plugin/acl_pass_plugin.h | 4 +- .../acl/src/acl_custom_opp_installer.cc | 4 +- .../acl/src/acl_custom_opp_installer.h | 4 +- .../acl/src/acl_memory_offload_pass_impl.cc | 4 +- .../acl/src/acl_memory_offload_pass_impl.h | 4 +- .../adapter/acl/src/acl_pass_impl.cc | 4 +- .../converter/adapter/acl/src/acl_pass_impl.h | 4 +- .../dpico/checker/activation_checker.cc | 4 +- .../dpico/checker/activation_checker.h | 4 +- .../adapter/dpico/checker/argmax_checker.cc | 4 +- .../adapter/dpico/checker/argmax_checker.h | 4 +- .../dpico/checker/arithmetic_checker.cc | 4 +- .../dpico/checker/arithmetic_checker.h | 4 +- .../dpico/checker/batchnorm_checker.cc | 4 +- .../adapter/dpico/checker/batchnorm_checker.h | 4 +- .../adapter/dpico/checker/common_checker.cc | 4 +- .../adapter/dpico/checker/common_checker.h | 4 +- .../adapter/dpico/checker/concat_checker.cc | 4 +- .../adapter/dpico/checker/concat_checker.h | 4 +- .../adapter/dpico/checker/conv2d_checker.cc | 4 +- .../adapter/dpico/checker/conv2d_checker.h | 4 +- .../dpico/checker/custom_op_checker.cc | 4 +- .../adapter/dpico/checker/custom_op_checker.h | 4 +- .../adapter/dpico/checker/eltwise_checker.cc | 4 +- .../adapter/dpico/checker/eltwise_checker.h | 4 +- .../adapter/dpico/checker/exp_checker.cc | 4 +- .../adapter/dpico/checker/exp_checker.h | 4 +- .../adapter/dpico/checker/flatten_checker.cc | 4 +- .../adapter/dpico/checker/flatten_checker.h | 4 +- .../dpico/checker/full_connection_checker.cc | 4 +- .../dpico/checker/full_connection_checker.h | 4 +- .../adapter/dpico/checker/log_checker.cc | 4 +- .../adapter/dpico/checker/log_checker.h | 4 +- .../adapter/dpico/checker/lrn_checker.cc | 4 +- .../adapter/dpico/checker/lrn_checker.h | 4 +- .../adapter/dpico/checker/lstm_checker.cc | 4 +- .../adapter/dpico/checker/lstm_checker.h | 4 +- .../adapter/dpico/checker/mat_mul_checker.cc | 4 +- .../adapter/dpico/checker/mat_mul_checker.h | 4 +- .../adapter/dpico/checker/mvn_checker.cc | 4 +- .../adapter/dpico/checker/mvn_checker.h | 4 +- .../adapter/dpico/checker/op_checker.cc | 4 +- .../adapter/dpico/checker/op_checker.h | 4 +- .../adapter/dpico/checker/pooling_checker.cc | 4 +- .../adapter/dpico/checker/pooling_checker.h | 4 +- .../adapter/dpico/checker/pow_checker.cc | 4 +- .../adapter/dpico/checker/pow_checker.h | 4 +- .../adapter/dpico/checker/reduce_checker.cc | 4 +- .../adapter/dpico/checker/reduce_checker.h | 4 +- .../adapter/dpico/checker/reshape_checker.cc | 4 +- .../adapter/dpico/checker/reshape_checker.h | 4 +- .../adapter/dpico/checker/resize_checker.cc | 4 +- .../adapter/dpico/checker/resize_checker.h | 4 +- .../adapter/dpico/checker/reverse_checker.cc | 4 +- .../adapter/dpico/checker/reverse_checker.h | 4 +- .../adapter/dpico/checker/scale_checker.cc | 4 +- .../adapter/dpico/checker/scale_checker.h | 4 +- .../adapter/dpico/checker/slice_checker.cc | 4 +- .../adapter/dpico/checker/slice_checker.h | 4 +- .../adapter/dpico/checker/softmax_checker.cc | 4 +- .../adapter/dpico/checker/softmax_checker.h | 4 +- .../adapter/dpico/checker/split_checker.cc | 4 +- .../adapter/dpico/checker/split_checker.h | 4 +- .../adapter/dpico/checker/spp_checker.cc | 4 +- .../adapter/dpico/checker/spp_checker.h | 4 +- .../adapter/dpico/checker/squeeze_checker.cc | 4 +- .../adapter/dpico/checker/squeeze_checker.h | 4 +- .../dpico/checker/strided_slice_checker.cc | 4 +- .../dpico/checker/strided_slice_checker.h | 4 +- .../dpico/checker/transpose_checker.cc | 4 +- .../adapter/dpico/checker/transpose_checker.h | 4 +- .../adapter/dpico/common/anf_util.cc | 8 +- .../converter/adapter/dpico/common/anf_util.h | 4 +- .../dpico/common/data_transpose_utils.cc | 4 +- .../dpico/common/data_transpose_utils.h | 4 +- .../adapter/dpico/common/fetch_content.cc | 4 +- .../adapter/dpico/common/fetch_content.h | 4 +- .../adapter/dpico/common/file_util.cc | 4 +- .../adapter/dpico/common/file_util.h | 4 +- .../converter/adapter/dpico/common/float16.h | 4 +- .../adapter/dpico/common/format_utils.cc | 4 +- .../adapter/dpico/common/format_utils.h | 4 +- .../dpico/common/graph_output_name_keeper.cc | 4 +- .../dpico/common/graph_output_name_keeper.h | 4 +- .../adapter/dpico/common/infer_util.cc | 4 +- .../adapter/dpico/common/infer_util.h | 4 +- .../converter/adapter/dpico/common/op_attr.h | 4 +- .../converter/adapter/dpico/common/op_enum.h | 4 +- .../adapter/dpico/common/string_util.cc | 4 +- .../adapter/dpico/common/string_util.h | 4 +- .../adapter/dpico/infer/dpico_common_infer.cc | 4 +- .../adapter/dpico/infer/dpico_common_infer.h | 4 +- .../adapter/dpico/infer/dpico_custom_infer.cc | 4 +- .../adapter/dpico/infer/dpico_custom_infer.h | 4 +- .../dpico/infer/dpico_decbbox_infer.cc | 4 +- .../adapter/dpico/infer/dpico_decbbox_infer.h | 4 +- .../infer/dpico_detection_output_infer.cc | 4 +- .../infer/dpico_detection_output_infer.h | 4 +- .../dpico/infer/dpico_extract_infer.cc | 4 +- .../adapter/dpico/infer/dpico_extract_infer.h | 4 +- .../adapter/dpico/infer/dpico_lstm_infer.cc | 4 +- .../adapter/dpico/infer/dpico_lstm_infer.h | 4 +- .../dpico/infer/dpico_lstm_onnx_infer.cc | 4 +- .../dpico/infer/dpico_lstm_onnx_infer.h | 4 +- .../dpico/infer/dpico_maxunpool_infer.cc | 4 +- .../dpico/infer/dpico_maxunpool_infer.h | 4 +- .../dpico/infer/dpico_passthrough_infer.cc | 4 +- .../dpico/infer/dpico_passthrough_infer.h | 4 +- .../dpico/infer/dpico_psroi_pool_infer.cc | 4 +- .../dpico/infer/dpico_psroi_pool_infer.h | 4 +- .../dpico/infer/dpico_recurrent_infer.cc | 4 +- .../dpico/infer/dpico_recurrent_infer.h | 4 +- .../dpico/infer/dpico_roi_align_infer.cc | 4 +- .../dpico/infer/dpico_roi_align_infer.h | 4 +- .../adapter/dpico/infer/dpico_spp_infer.cc | 4 +- .../adapter/dpico/infer/dpico_spp_infer.h | 4 +- .../dpico/infer/dpico_upsample_infer.cc | 4 +- .../dpico/infer/dpico_upsample_infer.h | 4 +- .../dpico/legacy_ops/bi_lstm_mapper.cc | 4 +- .../adapter/dpico/legacy_ops/bi_lstm_mapper.h | 4 +- .../adapter/dpico/legacy_ops/nop_mapper.cc | 4 +- .../adapter/dpico/legacy_ops/nop_mapper.h | 4 +- .../dpico/legacy_ops/reverse_mapper.cc | 4 +- .../adapter/dpico/legacy_ops/reverse_mapper.h | 4 +- .../dpico/legacy_ops/roi_align_mapper.cc | 4 +- .../dpico/legacy_ops/roi_align_mapper.h | 4 +- .../adapter/dpico/legacy_ops/spp_mapper.cc | 4 +- .../adapter/dpico/legacy_ops/spp_mapper.h | 4 +- .../adapter/dpico/mapper/abs_mapper.cc | 4 +- .../adapter/dpico/mapper/abs_mapper.h | 4 +- .../adapter/dpico/mapper/acos_mapper.cc | 4 +- .../adapter/dpico/mapper/acos_mapper.h | 4 +- .../adapter/dpico/mapper/acosh_mapper.cc | 4 +- .../adapter/dpico/mapper/acosh_mapper.h | 4 +- .../adapter/dpico/mapper/activation_mapper.cc | 4 +- .../adapter/dpico/mapper/activation_mapper.h | 4 +- .../adapter/dpico/mapper/argmax_mapper.cc | 4 +- .../adapter/dpico/mapper/argmax_mapper.h | 4 +- .../adapter/dpico/mapper/arithmetic_mapper.cc | 4 +- .../adapter/dpico/mapper/arithmetic_mapper.h | 4 +- .../adapter/dpico/mapper/asinh_mapper.cc | 4 +- .../adapter/dpico/mapper/asinh_mapper.h | 4 +- .../adapter/dpico/mapper/atanh_mapper.cc | 4 +- .../adapter/dpico/mapper/atanh_mapper.h | 4 +- .../adapter/dpico/mapper/batch_norm_mapper.cc | 4 +- .../adapter/dpico/mapper/batch_norm_mapper.h | 4 +- .../adapter/dpico/mapper/bias_mapper.cc | 4 +- .../adapter/dpico/mapper/bias_mapper.h | 4 +- .../adapter/dpico/mapper/bitshift_mapper.cc | 4 +- .../adapter/dpico/mapper/bitshift_mapper.h | 4 +- .../adapter/dpico/mapper/bnll_mapper.cc | 4 +- .../adapter/dpico/mapper/bnll_mapper.h | 4 +- .../adapter/dpico/mapper/cast_mapper.cc | 4 +- .../adapter/dpico/mapper/cast_mapper.h | 4 +- .../adapter/dpico/mapper/clip_mapper.cc | 4 +- .../adapter/dpico/mapper/clip_mapper.h | 4 +- .../adapter/dpico/mapper/concat_mapper.cc | 4 +- .../adapter/dpico/mapper/concat_mapper.h | 4 +- .../adapter/dpico/mapper/conv_mapper.cc | 4 +- .../adapter/dpico/mapper/conv_mapper.h | 4 +- .../adapter/dpico/mapper/cosh_mapper.cc | 4 +- .../adapter/dpico/mapper/cosh_mapper.h | 4 +- .../adapter/dpico/mapper/crop_mapper.cc | 4 +- .../adapter/dpico/mapper/crop_mapper.h | 4 +- .../adapter/dpico/mapper/custom_mapper.cc | 4 +- .../adapter/dpico/mapper/custom_mapper.h | 4 +- .../adapter/dpico/mapper/decbbox_mapper.cc | 4 +- .../adapter/dpico/mapper/decbbox_mapper.h | 4 +- .../adapter/dpico/mapper/deconv_mapper.cc | 4 +- .../adapter/dpico/mapper/deconv_mapper.h | 4 +- .../dpico/mapper/detection_output_mapper.cc | 4 +- .../dpico/mapper/detection_output_mapper.h | 4 +- .../adapter/dpico/mapper/eltwise_mapper.cc | 4 +- .../adapter/dpico/mapper/eltwise_mapper.h | 4 +- .../adapter/dpico/mapper/exp_mapper.cc | 4 +- .../adapter/dpico/mapper/exp_mapper.h | 4 +- .../adapter/dpico/mapper/extract_mapper.cc | 4 +- .../adapter/dpico/mapper/extract_mapper.h | 4 +- .../adapter/dpico/mapper/fc_mapper.cc | 4 +- .../adapter/dpico/mapper/fc_mapper.h | 4 +- .../adapter/dpico/mapper/flatten_mapper.cc | 4 +- .../adapter/dpico/mapper/flatten_mapper.h | 4 +- .../adapter/dpico/mapper/gather_mapper.cc | 4 +- .../adapter/dpico/mapper/gather_mapper.h | 4 +- .../dpico/mapper/gatherelements_mapper.cc | 4 +- .../dpico/mapper/gatherelements_mapper.h | 4 +- .../dpico/mapper/greaterorequal_mapper.cc | 4 +- .../dpico/mapper/greaterorequal_mapper.h | 4 +- .../adapter/dpico/mapper/gru_mapper.cc | 4 +- .../adapter/dpico/mapper/gru_mapper.h | 4 +- .../adapter/dpico/mapper/hardmax_mapper.cc | 4 +- .../adapter/dpico/mapper/hardmax_mapper.h | 4 +- .../adapter/dpico/mapper/hardsigmod_mapper.cc | 4 +- .../adapter/dpico/mapper/hardsigmod_mapper.h | 4 +- .../adapter/dpico/mapper/interp_mapper.cc | 4 +- .../adapter/dpico/mapper/interp_mapper.h | 4 +- .../dpico/mapper/lessorequal_mapper.cc | 4 +- .../adapter/dpico/mapper/lessorequal_mapper.h | 4 +- .../adapter/dpico/mapper/log_mapper.cc | 4 +- .../adapter/dpico/mapper/log_mapper.h | 4 +- .../adapter/dpico/mapper/lrn_mapper.cc | 4 +- .../adapter/dpico/mapper/lrn_mapper.h | 4 +- .../adapter/dpico/mapper/lstm_mapper.cc | 4 +- .../adapter/dpico/mapper/lstm_mapper.h | 4 +- .../adapter/dpico/mapper/mat_mul_mapper.cc | 4 +- .../adapter/dpico/mapper/mat_mul_mapper.h | 4 +- .../adapter/dpico/mapper/maxunpool_mapper.cc | 4 +- .../adapter/dpico/mapper/maxunpool_mapper.h | 4 +- .../adapter/dpico/mapper/mish_mapper.cc | 4 +- .../adapter/dpico/mapper/mish_mapper.h | 4 +- .../adapter/dpico/mapper/mod_mapper.cc | 4 +- .../adapter/dpico/mapper/mod_mapper.h | 4 +- .../adapter/dpico/mapper/mvn_mapper.cc | 4 +- .../adapter/dpico/mapper/mvn_mapper.h | 4 +- .../adapter/dpico/mapper/normalize_mapper.cc | 4 +- .../adapter/dpico/mapper/normalize_mapper.h | 4 +- .../adapter/dpico/mapper/op_mapper.cc | 4 +- .../adapter/dpico/mapper/op_mapper.h | 4 +- .../dpico/mapper/op_mapper_registry.cc | 4 +- .../adapter/dpico/mapper/op_mapper_registry.h | 4 +- .../adapter/dpico/mapper/pad_mapper.cc | 4 +- .../adapter/dpico/mapper/pad_mapper.h | 4 +- .../dpico/mapper/passthrough_mapper.cc | 4 +- .../adapter/dpico/mapper/passthrough_mapper.h | 4 +- .../adapter/dpico/mapper/permute_mapper.cc | 4 +- .../adapter/dpico/mapper/permute_mapper.h | 4 +- .../adapter/dpico/mapper/pool_mapper.cc | 4 +- .../adapter/dpico/mapper/pool_mapper.h | 4 +- .../adapter/dpico/mapper/power_mapper.cc | 4 +- .../adapter/dpico/mapper/power_mapper.h | 4 +- .../adapter/dpico/mapper/prelu_mapper.cc | 4 +- .../adapter/dpico/mapper/prelu_mapper.h | 4 +- .../adapter/dpico/mapper/psroi_pool_mapper.cc | 4 +- .../adapter/dpico/mapper/psroi_pool_mapper.h | 4 +- .../adapter/dpico/mapper/reduction_mapper.cc | 4 +- .../adapter/dpico/mapper/reduction_mapper.h | 4 +- .../adapter/dpico/mapper/reshape_mapper.cc | 4 +- .../adapter/dpico/mapper/reshape_mapper.h | 4 +- .../adapter/dpico/mapper/resize_mapper.cc | 4 +- .../adapter/dpico/mapper/resize_mapper.h | 4 +- .../adapter/dpico/mapper/rnn_mapper.cc | 4 +- .../adapter/dpico/mapper/rnn_mapper.h | 4 +- .../adapter/dpico/mapper/roi_pool_mapper.cc | 4 +- .../adapter/dpico/mapper/roi_pool_mapper.h | 4 +- .../adapter/dpico/mapper/scale_mapper.cc | 4 +- .../adapter/dpico/mapper/scale_mapper.h | 4 +- .../adapter/dpico/mapper/shape_mapper.cc | 4 +- .../adapter/dpico/mapper/shape_mapper.h | 4 +- .../adapter/dpico/mapper/shrink_mapper.cc | 4 +- .../adapter/dpico/mapper/shrink_mapper.h | 4 +- .../dpico/mapper/shuffle_channel_mapper.cc | 4 +- .../dpico/mapper/shuffle_channel_mapper.h | 4 +- .../adapter/dpico/mapper/sinh_mapper.cc | 4 +- .../adapter/dpico/mapper/sinh_mapper.h | 4 +- .../adapter/dpico/mapper/slice_mapper.cc | 4 +- .../adapter/dpico/mapper/slice_mapper.h | 4 +- .../adapter/dpico/mapper/softmax_mapper.cc | 4 +- .../adapter/dpico/mapper/softmax_mapper.h | 4 +- .../adapter/dpico/mapper/softsign_mapper.cc | 4 +- .../adapter/dpico/mapper/softsign_mapper.h | 4 +- .../adapter/dpico/mapper/sqrt_mapper.cc | 4 +- .../adapter/dpico/mapper/sqrt_mapper.h | 4 +- .../adapter/dpico/mapper/squeeze_mapper.cc | 4 +- .../adapter/dpico/mapper/squeeze_mapper.h | 4 +- .../dpico/mapper/strided_slice_mapper.cc | 4 +- .../dpico/mapper/strided_slice_mapper.h | 4 +- .../adapter/dpico/mapper/threshold_mapper.cc | 4 +- .../adapter/dpico/mapper/threshold_mapper.h | 4 +- .../adapter/dpico/mapper/tile_mapper.cc | 4 +- .../adapter/dpico/mapper/tile_mapper.h | 4 +- .../adapter/dpico/mapper/unsqueeze_mapper.cc | 4 +- .../adapter/dpico/mapper/unsqueeze_mapper.h | 4 +- .../adapter/dpico/mapper/upsample_mapper.cc | 4 +- .../adapter/dpico/mapper/upsample_mapper.h | 4 +- .../adapter/dpico/mapper/xor_mapper.cc | 4 +- .../adapter/dpico/mapper/xor_mapper.h | 4 +- .../parser/detection_output_param_helper.cc | 4 +- .../parser/detection_output_param_helper.h | 4 +- .../adapter/dpico/src/calib_data_generator.cc | 4 +- .../adapter/dpico/src/calib_data_generator.h | 4 +- .../adapter/dpico/src/custom_creator.cc | 4 +- .../adapter/dpico/src/custom_creator.h | 4 +- .../adapter/dpico/src/data_preprocessor.cc | 4 +- .../adapter/dpico/src/data_preprocessor.h | 4 +- .../converter/adapter/dpico/src/dpico_pass.cc | 8 +- .../converter/adapter/dpico/src/dpico_pass.h | 4 +- .../dpico/src/dpico_preprocess_pass.cc | 6 +- .../adapter/dpico/src/dpico_preprocess_pass.h | 4 +- .../adapter/dpico/src/graph_split_api.cc | 4 +- .../adapter/dpico/src/graph_split_api.h | 4 +- .../adapter/dpico/src/graph_split_info.h | 4 +- .../adapter/dpico/src/mapper_config_parser.cc | 4 +- .../adapter/dpico/src/mapper_config_parser.h | 4 +- .../adapter/dpico/src/om_generator.cc | 4 +- .../adapter/dpico/src/om_generator.h | 4 +- .../tools/converter/converter_context.cc | 4 +- .../converter_lite/converter_flags.cc | 4 +- .../converter_lite/converter_flags.h | 4 +- .../tools/converter/cxx_api/converter_para.h | 4 +- .../tools/converter/import/cast_op_adjust.cc | 4 +- .../tools/converter/import/cast_op_adjust.h | 4 +- .../convert_extend_ops_pass.cc | 4 +- .../convert_extend_ops_pass.h | 4 +- .../import/convert_extend_ops/dense.cc | 4 +- .../import/convert_extend_ops/matmul_ext.cc | 4 +- .../import/convert_extend_ops/max_min.cc | 4 +- .../import/convert_extend_ops/muls.cc | 4 +- .../import/convert_extend_ops/ones.cc | 4 +- .../import/convert_extend_ops/sum_ext.cc | 4 +- .../import/convert_extend_ops/utils.cc | 4 +- .../import/convert_extend_ops/utils.h | 4 +- .../import/convert_extend_ops/zeros.cc | 4 +- .../micro/providers/nnie/nnie_interfaces.cc | 4 +- .../micro/providers/nnie/nnie_interfaces.h | 4 +- .../micro/providers/nnie/nnie_micro.cc | 4 +- .../converter/parser/lstm_adjust_pass.cc | 4 +- .../tools/converter/parser/lstm_adjust_pass.h | 4 +- .../parser/pytorch/pytorch_lstm_adjust.cc | 4 +- .../parser/pytorch/pytorch_lstm_adjust.h | 4 +- .../converter/parser/tf/functionalize_cond.cc | 4 +- .../converter/parser/tf/functionalize_cond.h | 4 +- .../tf/functionalize_control_op_pass.cc | 4 +- .../parser/tf/functionalize_control_op_pass.h | 4 +- .../parser/tf/functionalize_while.cc | 4 +- .../converter/parser/tf/functionalize_while.h | 4 +- .../parser/tf_bidirection_gru_cf_fusion.cc | 4 +- .../parser/tf_bidirection_gru_cf_fusion.h | 4 +- .../parser/unused_node_remove_pass.cc | 4 +- .../parser/unused_node_remove_pass.h | 4 +- .../converter/quantizer/gptq_quantizer.h | 4 +- .../registry/model_parser_registry.cc | 4 +- .../registry/node_parser_registry.cc | 4 +- .../tools/converter/registry/pass_registry.cc | 4 +- .../tools/optimizer/common/format_utils.cc | 4 +- .../tools/optimizer/common/format_utils.h | 4 +- .../tools/optimizer/common/gllo_utils.cc | 4 +- .../tools/optimizer/common/gllo_utils.h | 4 +- .../tools/optimizer/common/helper.cc | 4 +- .../tools/optimizer/common/helper.h | 4 +- .../common/multiple_pattern_process_pass.cc | 4 +- .../common/multiple_pattern_process_pass.h | 4 +- .../optimizer/common/node_pass_extends.cc | 4 +- .../optimizer/common/node_pass_extends.h | 4 +- .../optimizer/common/pass_manager_extends.cc | 4 +- .../optimizer/common/pass_manager_extends.h | 4 +- .../common/pattern_process_pass_extends.cc | 4 +- .../common/pattern_process_pass_extends.h | 4 +- .../const_fold/constant_folding_fusion.h | 4 +- .../const_fold/fold_along_infershape.cc | 4 +- .../const_fold/fold_along_infershape.h | 4 +- .../tools/optimizer/const_fold/fold_utils.cc | 4 +- .../tools/optimizer/const_fold/fold_utils.h | 4 +- .../const_fold/fold_with_infershape.cc | 4 +- .../const_fold/fold_with_infershape.h | 4 +- .../tools/optimizer/const_fold/rsqrt_fp32.cc | 4 +- .../tools/optimizer/const_fold/rsqrt_fp32.h | 4 +- .../fisson/eliminate_concat_split.cc | 4 +- .../optimizer/fisson/eliminate_concat_split.h | 4 +- .../tools/optimizer/fisson/fisson_util.cc | 4 +- .../tools/optimizer/fisson/fisson_util.h | 4 +- .../optimizer/fisson/iter_node_outputs.cc | 4 +- .../optimizer/fisson/iter_node_outputs.h | 4 +- .../optimizer/fisson/multi_conv_split_pass.cc | 4 +- .../optimizer/fisson/multi_conv_split_pass.h | 4 +- .../tools/optimizer/fisson/node_out_shapes.cc | 4 +- .../tools/optimizer/fisson/node_out_shapes.h | 4 +- .../format/delete_redundant_transpose.cc | 4 +- .../format/delete_redundant_transpose.h | 4 +- .../tools/optimizer/format/to_format_base.cc | 4 +- .../tools/optimizer/format/to_format_base.h | 4 +- .../tools/optimizer/format/to_nchw_format.cc | 4 +- .../tools/optimizer/format/to_nchw_format.h | 4 +- .../tools/optimizer/format/to_nhwc_format.cc | 4 +- .../tools/optimizer/format/to_nhwc_format.h | 4 +- .../optimizer/fusion/activation_fusion.cc | 4 +- .../optimizer/fusion/activation_fusion.h | 4 +- .../optimizer/fusion/add_activation_fusion.cc | 4 +- .../optimizer/fusion/add_activation_fusion.h | 4 +- .../fusion/add_concat_activation_fusion.cc | 4 +- .../fusion/add_concat_activation_fusion.h | 4 +- .../optimizer/fusion/add_layernorm_fusion.cc | 4 +- .../optimizer/fusion/add_layernorm_fusion.h | 4 +- .../optimizer/fusion/add_stream_label_pass.cc | 4 +- .../optimizer/fusion/add_stream_label_pass.h | 4 +- .../optimizer/fusion/adjust_col2im_pass.cc | 4 +- .../optimizer/fusion/adjust_col2im_pass.h | 4 +- .../fusion/adjust_controlflow_pass.cc | 4 +- .../fusion/adjust_controlflow_pass.h | 4 +- .../optimizer/fusion/adjust_matmul_pass.cc | 4 +- .../optimizer/fusion/adjust_matmul_pass.h | 4 +- .../fusion/adjust_resize_dims_pass.cc | 4 +- .../fusion/adjust_resize_dims_pass.h | 4 +- .../fusion/affine_activation_fusion.cc | 4 +- .../fusion/affine_activation_fusion.h | 4 +- .../tools/optimizer/fusion/affine_fusion.cc | 4 +- .../tools/optimizer/fusion/affine_fusion.h | 4 +- ...tiquant_add_mul_matmul_allreduce_fusion.cc | 4 +- ...ntiquant_add_mul_matmul_allreduce_fusion.h | 4 +- .../optimizer/fusion/batchmatmul_fusion.cc | 4 +- .../optimizer/fusion/batchmatmul_fusion.h | 4 +- .../fusion/batchnorm_to_scale_fusion.cc | 4 +- .../fusion/batchnorm_to_scale_fusion.h | 4 +- .../tools/optimizer/fusion/cast_fusion.cc | 4 +- .../tools/optimizer/fusion/cast_fusion.h | 4 +- .../optimizer/fusion/concat_concat_fusion.cc | 4 +- .../optimizer/fusion/concat_concat_fusion.h | 4 +- .../fusion/conv_activation_fusion.cc | 4 +- .../optimizer/fusion/conv_activation_fusion.h | 4 +- .../optimizer/fusion/conv_biasadd_fusion.cc | 4 +- .../optimizer/fusion/conv_biasadd_fusion.h | 4 +- .../tools/optimizer/fusion/conv_bn_fusion.cc | 4 +- .../tools/optimizer/fusion/conv_bn_fusion.h | 4 +- .../optimizer/fusion/conv_conv_fusion.cc | 4 +- .../tools/optimizer/fusion/conv_conv_fusion.h | 4 +- .../tools/optimizer/fusion/conv_pad_fusion.cc | 4 +- .../tools/optimizer/fusion/conv_pad_fusion.h | 4 +- .../optimizer/fusion/conv_scale_fusion.cc | 4 +- .../optimizer/fusion/conv_scale_fusion.h | 4 +- .../optimizer/fusion/conv_transform_fusion.cc | 4 +- .../optimizer/fusion/conv_transform_fusion.h | 4 +- .../fusion/conv_tuple_activation_fusion.cc | 4 +- .../fusion/conv_tuple_activation_fusion.h | 4 +- .../fusion/conv_tuplegetitem_fusion.cc | 4 +- .../fusion/conv_tuplegetitem_fusion.h | 4 +- .../optimizer/fusion/decoder_layer_fusion.cc | 4 +- .../optimizer/fusion/decoder_layer_fusion.h | 4 +- .../optimizer/fusion/encoder_layer_fusion.cc | 4 +- .../optimizer/fusion/encoder_layer_fusion.h | 4 +- .../fusion/expanddims_reshape_fusion.cc | 4 +- .../fusion/expanddims_reshape_fusion.h | 4 +- .../optimizer/fusion/ffn_antiquant_fusion.cc | 4 +- .../optimizer/fusion/ffn_antiquant_fusion.h | 4 +- .../tools/optimizer/fusion/ffn_custom_pass.cc | 4 +- .../tools/optimizer/fusion/ffn_custom_pass.h | 4 +- .../tools/optimizer/fusion/ffn_fusion.cc | 4 +- .../tools/optimizer/fusion/ffn_fusion.h | 4 +- .../flash_attention_antiquant_fusion.cc | 4 +- .../fusion/flash_attention_antiquant_fusion.h | 4 +- .../fusion/flash_attention_fusion.cc | 4 +- .../optimizer/fusion/flash_attention_fusion.h | 4 +- .../flash_attention_fusion_for_custom.cc | 4 +- .../flash_attention_fusion_for_custom.h | 4 +- .../fusion/flash_attention_tik_fusion.cc | 4 +- .../fusion/flash_attention_tik_fusion.h | 4 +- .../fusion/fullconnected_add_fusion.cc | 4 +- .../fusion/fullconnected_add_fusion.h | 4 +- .../optimizer/fusion/fullconnected_fusion.cc | 4 +- .../optimizer/fusion/fullconnected_fusion.h | 4 +- .../tools/optimizer/fusion/gegluv2_fusion.cc | 4 +- .../tools/optimizer/fusion/gegluv2_fusion.h | 4 +- .../tools/optimizer/fusion/gelu_fusion.cc | 4 +- .../tools/optimizer/fusion/gelu_fusion.h | 4 +- .../tools/optimizer/fusion/glu_fusion.cc | 4 +- .../tools/optimizer/fusion/glu_fusion.h | 4 +- .../tools/optimizer/fusion/gnbmm_pass.cc | 4 +- .../tools/optimizer/fusion/gnbmm_pass.h | 4 +- .../tools/optimizer/fusion/gnsnz_pass.cc | 4 +- .../tools/optimizer/fusion/gnsnz_pass.h | 4 +- .../optimizer/fusion/graph_split_pass.cc | 4 +- .../tools/optimizer/fusion/graph_split_pass.h | 4 +- .../optimizer/fusion/groupnorm_fusion.cc | 4 +- .../tools/optimizer/fusion/groupnorm_fusion.h | 4 +- .../optimizer/fusion/groupnormsilu_fusion.cc | 4 +- .../optimizer/fusion/groupnormsilu_fusion.h | 4 +- .../optimizer/fusion/hard_swish_fusion.cc | 4 +- .../optimizer/fusion/hard_swish_fusion.h | 4 +- .../fusion/kv_cache_mgr_assign_fusion.cc | 4 +- .../fusion/kv_cache_mgr_assign_fusion.h | 4 +- .../fusion/kv_cache_mgr_concat_fusion.cc | 4 +- .../fusion/kv_cache_mgr_concat_fusion.h | 4 +- .../fusion/kv_cache_mgr_load_fusion.cc | 4 +- .../fusion/kv_cache_mgr_load_fusion.h | 4 +- .../fusion/kv_cache_mgr_one_branch_fusion.cc | 4 +- .../fusion/kv_cache_mgr_one_branch_fusion.h | 4 +- .../optimizer/fusion/leaky_relu_fusion.cc | 4 +- .../optimizer/fusion/leaky_relu_fusion.h | 4 +- .../fusion/matmul_activation_fusion.cc | 4 +- .../fusion/matmul_activation_fusion.h | 4 +- .../optimizer/fusion/matmul_add_fusion.cc | 4 +- .../optimizer/fusion/matmul_add_fusion.h | 4 +- .../fusion/matmul_allreduce_fusion.cc | 4 +- .../fusion/matmul_allreduce_fusion.h | 4 +- .../optimizer/fusion/matmul_mul_fusion.cc | 4 +- .../optimizer/fusion/matmul_mul_fusion.h | 4 +- .../optimizer/fusion/matmul_scale_fusion.cc | 4 +- .../optimizer/fusion/matmul_scale_fusion.h | 4 +- .../optimizer/fusion/mul_activation_fusion.cc | 4 +- .../optimizer/fusion/mul_activation_fusion.h | 4 +- .../tools/optimizer/fusion/mul_add_fusion.cc | 4 +- .../tools/optimizer/fusion/mul_add_fusion.h | 4 +- .../optimizer/fusion/mul_reduce_fusion.cc | 4 +- .../optimizer/fusion/mul_reduce_fusion.h | 4 +- .../fusion/multi_head_attention_fusion.cc | 4 +- .../fusion/multi_head_attention_fusion.h | 4 +- .../tools/optimizer/fusion/norm_fusion.cc | 4 +- .../tools/optimizer/fusion/norm_fusion.h | 4 +- .../optimizer/fusion/onnx_gelu_fusion.cc | 4 +- .../tools/optimizer/fusion/onnx_gelu_fusion.h | 4 +- .../tools/optimizer/fusion/prelu_fusion.cc | 4 +- .../tools/optimizer/fusion/prelu_fusion.h | 4 +- .../fusion/quant_dtype_cast_fusion.cc | 4 +- .../fusion/quant_dtype_cast_fusion.h | 4 +- .../fusion/reduce_same_op_in_horizon.cc | 4 +- .../fusion/reduce_same_op_in_horizon.h | 4 +- .../optimizer/fusion/reduce_stack_fusion.cc | 4 +- .../optimizer/fusion/reduce_stack_fusion.h | 4 +- .../fusion/remove_transitivity_op.cc | 4 +- .../optimizer/fusion/remove_transitivity_op.h | 4 +- .../fusion/reshape_like_operator_ablation.cc | 4 +- .../fusion/reshape_like_operator_ablation.h | 4 +- .../optimizer/fusion/reshape_reduce_fusion.cc | 4 +- .../optimizer/fusion/reshape_reduce_fusion.h | 4 +- .../fusion/reshape_reshape_fusion.cc | 4 +- .../optimizer/fusion/reshape_reshape_fusion.h | 4 +- .../optimizer/fusion/reshape_shape_fusion.cc | 4 +- .../optimizer/fusion/reshape_shape_fusion.h | 4 +- .../fusion/reshape_transpose_fusion.cc | 4 +- .../fusion/reshape_transpose_fusion.h | 4 +- .../tools/optimizer/fusion/resize_fusion.cc | 4 +- .../tools/optimizer/fusion/resize_fusion.h | 4 +- .../fusion/scale_activation_fusion.cc | 4 +- .../fusion/scale_activation_fusion.h | 4 +- .../optimizer/fusion/scale_base_fusion.cc | 4 +- .../optimizer/fusion/scale_base_fusion.h | 4 +- .../optimizer/fusion/scale_scale_fusion.cc | 4 +- .../optimizer/fusion/scale_scale_fusion.h | 4 +- .../optimizer/fusion/sigmoid_mul_fusion.cc | 4 +- .../optimizer/fusion/sigmoid_mul_fusion.h | 4 +- .../fusion/squeeze_expanddims_fusion.cc | 4 +- .../fusion/squeeze_expanddims_fusion.h | 4 +- .../tools/optimizer/fusion/squeeze_fusion.cc | 4 +- .../tools/optimizer/fusion/squeeze_fusion.h | 4 +- .../optimizer/fusion/strided_slice_checker.cc | 4 +- .../optimizer/fusion/strided_slice_checker.h | 4 +- .../optimizer/fusion/strided_slice_fusion.cc | 4 +- .../optimizer/fusion/strided_slice_fusion.h | 4 +- .../optimizer/fusion/tensor_dot_fusion.cc | 4 +- .../optimizer/fusion/tensor_dot_fusion.h | 4 +- .../fusion/tf_bidirection_gru_fusion.cc | 4 +- .../fusion/tf_bidirection_gru_fusion.h | 4 +- .../tools/optimizer/fusion/tf_gelu_fusion.cc | 4 +- .../tools/optimizer/fusion/tf_gelu_fusion.h | 4 +- .../optimizer/fusion/tf_lstm_cell_fusion.cc | 4 +- .../optimizer/fusion/tf_lstm_cell_fusion.h | 4 +- .../fusion/tflite_lstm_cell_fusion.cc | 4 +- .../fusion/tflite_lstm_cell_fusion.h | 4 +- ...ite_rel_pos_multi_head_attention_fusion.cc | 4 +- ...lite_rel_pos_multi_head_attention_fusion.h | 4 +- .../optimizer/fusion/tile_matmul_fusion.cc | 4 +- .../optimizer/fusion/tile_matmul_fusion.h | 4 +- .../optimizer/fusion/transpose_fusion.cc | 4 +- .../tools/optimizer/fusion/transpose_fusion.h | 4 +- .../fusion/transpose_gather_fusion.cc | 4 +- .../fusion/transpose_gather_fusion.h | 4 +- .../fusion/transpose_matmul_fusion.cc | 4 +- .../fusion/transpose_matmul_fusion.h | 4 +- .../tools/optimizer/graph/add_tensor_array.cc | 4 +- .../tools/optimizer/graph/add_tensor_array.h | 4 +- .../optimizer/graph/add_variable_node_pass.cc | 4 +- .../optimizer/graph/add_variable_node_pass.h | 4 +- .../graph/adjust_ascend_quant_pass.cc | 4 +- .../graph/adjust_ascend_quant_pass.h | 4 +- .../optimizer/graph/args_to_attr_pass.cc | 4 +- .../tools/optimizer/graph/args_to_attr_pass.h | 4 +- .../optimizer/graph/attr_to_args_pass.cc | 4 +- .../tools/optimizer/graph/attr_to_args_pass.h | 4 +- .../optimizer/graph/broadcast_for_select.cc | 4 +- .../optimizer/graph/broadcast_for_select.h | 4 +- .../graph/clip_convert_activation_pass.cc | 4 +- .../graph/clip_convert_activation_pass.h | 4 +- .../tools/optimizer/graph/concat_op_pass.cc | 4 +- .../tools/optimizer/graph/concat_op_pass.h | 4 +- .../optimizer/graph/control_flow_pass.cc | 4 +- .../tools/optimizer/graph/control_flow_pass.h | 4 +- .../optimizer/graph/core_infershape_pass.cc | 4 +- .../optimizer/graph/core_infershape_pass.h | 4 +- .../graph/decrease_transpose_algo.cc | 4 +- .../optimizer/graph/decrease_transpose_algo.h | 4 +- .../tools/optimizer/graph/dump_graph.h | 4 +- .../graph/eliminate_redundant_cast_pass.cc | 4 +- .../graph/eliminate_redundant_cast_pass.h | 4 +- .../graph/group_depthwise_op_convert_pass.cc | 4 +- .../graph/group_depthwise_op_convert_pass.h | 4 +- .../optimizer/graph/grouped_matmul_op_pass.cc | 4 +- .../optimizer/graph/grouped_matmul_op_pass.h | 4 +- .../tools/optimizer/graph/infershape_pass.cc | 4 +- .../tools/optimizer/graph/infershape_pass.h | 4 +- .../graph/input_and_output_variable_pass.cc | 4 +- .../graph/input_and_output_variable_pass.h | 4 +- .../graph/input_data_type_trans_pass.cc | 4 +- .../graph/input_data_type_trans_pass.h | 4 +- .../optimizer/graph/int64_cast_int32_pass.cc | 4 +- .../optimizer/graph/int64_cast_int32_pass.h | 4 +- .../optimizer/graph/kvcache_quant_pass.cc | 4 +- .../optimizer/graph/kvcache_quant_pass.h | 4 +- .../optimizer/graph/lite_tensor_extractor.cc | 4 +- .../optimizer/graph/lite_tensor_extractor.h | 4 +- .../tools/optimizer/graph/make_list_pass.cc | 4 +- .../tools/optimizer/graph/make_list_pass.h | 4 +- .../optimizer/graph/miniaturization_pass.cc | 4 +- .../optimizer/graph/miniaturization_pass.h | 4 +- .../optimizer/graph/mul_constant_pass.cc | 4 +- .../tools/optimizer/graph/mul_constant_pass.h | 4 +- .../tools/optimizer/graph/node_infershape.cc | 4 +- .../tools/optimizer/graph/node_infershape.h | 4 +- .../optimizer/graph/output_variable_pass.cc | 4 +- .../optimizer/graph/output_variable_pass.h | 4 +- .../tools/optimizer/graph/padv3_ge_pass.cc | 4 +- .../tools/optimizer/graph/padv3_ge_pass.h | 4 +- .../graph/preprocess_dynamic_shape.cc | 4 +- .../graph/preprocess_dynamic_shape.h | 4 +- .../quant_fusion_x_offset_to_bias_pass.cc | 4 +- .../quant_fusion_x_offset_to_bias_pass.h | 4 +- .../optimizer/graph/reduce_same_act_pass.cc | 4 +- .../optimizer/graph/reduce_same_act_pass.h | 4 +- .../graph/redundant_op_remove_pass.cc | 4 +- .../graph/redundant_op_remove_pass.h | 4 +- .../tools/optimizer/graph/remove_load_pass.cc | 4 +- .../tools/optimizer/graph/remove_load_pass.h | 4 +- .../tools/optimizer/graph/scalar_op_pass.cc | 4 +- .../tools/optimizer/graph/scalar_op_pass.h | 4 +- .../graph/send_op_add_control_depend.cc | 4 +- .../graph/send_op_add_control_depend.h | 4 +- .../optimizer/graph/slice_prepose_pass.cc | 4 +- .../optimizer/graph/slice_prepose_pass.h | 4 +- .../graph/special_node_postprocess.cc | 4 +- .../graph/special_node_postprocess.h | 4 +- .../graph/specify_graph_input_format.cc | 4 +- .../graph/specify_graph_input_format.h | 4 +- .../graph/specify_graph_output_format.cc | 4 +- .../graph/specify_graph_output_format.h | 4 +- .../tools/optimizer/graph/split_one_pass.cc | 4 +- .../tools/optimizer/graph/split_one_pass.h | 4 +- .../graph/split_with_size_op_pass.cc | 4 +- .../optimizer/graph/split_with_size_op_pass.h | 4 +- .../optimizer/graph/transpose_strategy.cc | 4 +- .../optimizer/graph/transpose_strategy.h | 4 +- .../graph/unused_add_node_remove_pass.cc | 4 +- .../graph/unused_add_node_remove_pass.h | 4 +- .../unused_transpose_node_remove_pass.cc | 4 +- .../graph/unused_transpose_node_remove_pass.h | 4 +- .../graph/update_conv2d_param_pass.cc | 4 +- .../graph/update_conv2d_param_pass.h | 4 +- .../tools/optimizer/parallel/conv2d_info.cc | 4 +- .../tools/optimizer/parallel/conv2d_info.h | 4 +- .../parallel/depthwise_conv2d_info.cc | 4 +- .../parallel/depthwise_conv2d_info.h | 4 +- .../optimizer/parallel/multi_conv_info.cc | 4 +- .../optimizer/parallel/multi_conv_info.h | 4 +- .../optimizer/parallel/multi_node_split.cc | 4 +- .../optimizer/parallel/multi_node_split.h | 4 +- .../tools/optimizer/parallel/operator_info.cc | 4 +- .../tools/optimizer/parallel/operator_info.h | 4 +- .../parallel/operator_info_register.cc | 4 +- .../parallel/operator_info_register.h | 4 +- .../tools/optimizer/parallel/parallel_pass.cc | 4 +- .../tools/optimizer/parallel/parallel_pass.h | 4 +- .../optimizer/parallel/split_strategy.cc | 4 +- .../tools/optimizer/parallel/split_strategy.h | 4 +- .../tools/optimizer/parallel/spliter.cc | 4 +- .../tools/optimizer/parallel/spliter.h | 4 +- 2852 files changed, 12390 insertions(+), 12387 deletions(-) mode change 100755 => 100644 mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.cc mode change 100755 => 100644 mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.h mode change 100755 => 100644 mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.cc mode change 100755 => 100644 mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.h mode change 100755 => 100644 mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.cc mode change 100755 => 100644 mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.h mode change 100755 => 100644 mindspore-lite/minddata/dataset/engine/ir/datasetops/source/caltech256_node.cc mode change 100755 => 100644 mindspore-lite/minddata/dataset/engine/ir/datasetops/source/caltech256_node.h mode change 100755 => 100644 mindspore-lite/minddata/dataset/include/dataset/vision.h mode change 100755 => 100644 mindspore-lite/minddata/dataset/kernels/image/image_utils.h mode change 100755 => 100644 mindspore-lite/src/litert/cxx_api/model/model_group.cc mode change 100755 => 100644 mindspore-lite/src/litert/cxx_api/model/model_group_impl.cc mode change 100755 => 100644 mindspore-lite/tools/converter/adapter/dpico/common/graph_output_name_keeper.h mode change 100755 => 100644 mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_onnx_infer.cc mode change 100755 => 100644 mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_onnx_infer.h mode change 100755 => 100644 mindspore-lite/tools/converter/adapter/dpico/infer/dpico_maxunpool_infer.cc mode change 100755 => 100644 mindspore-lite/tools/converter/adapter/dpico/infer/dpico_maxunpool_infer.h mode change 100755 => 100644 mindspore-lite/tools/converter/adapter/dpico/mapper/maxunpool_mapper.cc mode change 100755 => 100644 mindspore-lite/tools/converter/adapter/dpico/mapper/maxunpool_mapper.h diff --git a/mindspore-lite/examples/cloud_infer/runtime_cpp/flags.h b/mindspore-lite/examples/cloud_infer/runtime_cpp/flags.h index 2f4ce7be..297caf6e 100644 --- a/mindspore-lite/examples/cloud_infer/runtime_cpp/flags.h +++ b/mindspore-lite/examples/cloud_infer/runtime_cpp/flags.h @@ -25,7 +25,7 @@ #include #include -namespace mindspore::example { +namespace mindspore::lite::example { // declare #define DEFINE_string(name, default_val, desc) \ static std::string FLAGS_##name = default_val; \ @@ -220,5 +220,5 @@ bool ParseCommandLineFlags(int argc, const char **argv) { } return true; } -} // namespace mindspore::example +} // namespace mindspore::lite::example #endif // MINDSPORE_LITE_EXAMPLE_RUNTIME_CPP_FLAGS_H_ diff --git a/mindspore-lite/examples/converter_acl_custom_pass/pass/pass_registry_tutorial.h b/mindspore-lite/examples/converter_acl_custom_pass/pass/pass_registry_tutorial.h index 3ca8361e..ac59f967 100644 --- a/mindspore-lite/examples/converter_acl_custom_pass/pass/pass_registry_tutorial.h +++ b/mindspore-lite/examples/converter_acl_custom_pass/pass/pass_registry_tutorial.h @@ -19,7 +19,7 @@ #include "include/registry/pass_base.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class PassTutorial : public registry::PassBase { public: @@ -33,5 +33,5 @@ class PassTutorial : public registry::PassBase { bool CreateCustomOp(const api::FuncGraphPtr func_graph, const api::AnfNodePtr &node); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXAMPLES_CONVERTER_EXTEND_PASS_PASS_REGISTRY_TUTORIAL_H diff --git a/mindspore-lite/examples/converter_extend/infer/custom_add_infer.cc b/mindspore-lite/examples/converter_extend/infer/custom_add_infer.cc index c530c182..1fbe486d 100644 --- a/mindspore-lite/examples/converter_extend/infer/custom_add_infer.cc +++ b/mindspore-lite/examples/converter_extend/infer/custom_add_infer.cc @@ -17,7 +17,7 @@ #include "infer/custom_common.h" #include "include/registry/register_kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { /** * CustomAddInfer is a child class to infer current node output's information, including format, data_type and shape. * if inputs' shape exist -1, don't worry, which shows that shape will be inferred when running. @@ -44,4 +44,4 @@ class CustomAddInfer : public kernel::KernelInterface { }; std::shared_ptr CustomAddInferCreator() { return std::make_shared(); } REGISTER_CUSTOM_KERNEL_INTERFACE(CustomOpTutorial, Custom_Add, CustomAddInferCreator) -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/examples/converter_extend/infer/custom_common.cc b/mindspore-lite/examples/converter_extend/infer/custom_common.cc index 32dee3d2..f34206f5 100644 --- a/mindspore-lite/examples/converter_extend/infer/custom_common.cc +++ b/mindspore-lite/examples/converter_extend/infer/custom_common.cc @@ -17,7 +17,7 @@ #include "infer/custom_common.h" #include -namespace mindspore { +namespace mindspore::lite { namespace common { Status CheckInputs(const std::vector &inputs) { for (auto &input : inputs) { @@ -29,4 +29,4 @@ Status CheckInputs(const std::vector &inputs) { return kSuccess; } } // namespace common -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/examples/converter_extend/infer/custom_common.h b/mindspore-lite/examples/converter_extend/infer/custom_common.h index 5f78622b..c3435a81 100644 --- a/mindspore-lite/examples/converter_extend/infer/custom_common.h +++ b/mindspore-lite/examples/converter_extend/infer/custom_common.h @@ -21,10 +21,10 @@ #include "include/api/types.h" #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { namespace common { // verify that the inputs' shape is inferred successfully when inferring current node. Status CheckInputs(const std::vector &inputs); } // namespace common -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXAMPLES_CONVERTER_EXTEND_INFER_CUSTOM_COMMON_H diff --git a/mindspore-lite/examples/converter_extend/node_parser/add_parser_tutorial.cc b/mindspore-lite/examples/converter_extend/node_parser/add_parser_tutorial.cc index bae1816a..2673075f 100644 --- a/mindspore-lite/examples/converter_extend/node_parser/add_parser_tutorial.cc +++ b/mindspore-lite/examples/converter_extend/node_parser/add_parser_tutorial.cc @@ -19,7 +19,7 @@ #include "include/registry/node_parser_registry.h" #include "infer/cxx_api/add_fusion.h" -namespace mindspore { +namespace mindspore::lite { namespace converter { ops::BaseOperatorPtr AddParserTutorial::Parse(const std::unique_ptr &tflite_op, const std::unique_ptr &tflite_subgraph, @@ -34,4 +34,4 @@ ops::BaseOperatorPtr AddParserTutorial::Parse(const std::unique_ptr()); } // namespace converter -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/examples/converter_extend/node_parser/add_parser_tutorial.h b/mindspore-lite/examples/converter_extend/node_parser/add_parser_tutorial.h index 28306d53..e26b3bf1 100644 --- a/mindspore-lite/examples/converter_extend/node_parser/add_parser_tutorial.h +++ b/mindspore-lite/examples/converter_extend/node_parser/add_parser_tutorial.h @@ -20,7 +20,7 @@ #include #include "include/registry/node_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace converter { class AddParserTutorial : public NodeParser { public: @@ -31,6 +31,6 @@ class AddParserTutorial : public NodeParser { const std::unique_ptr &tflite_model) override; }; } // namespace converter -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXAMPLES_CONVERTER_EXTEND_NODE_PARSER_ADD_PARSER_TUTORIAL_H diff --git a/mindspore-lite/examples/converter_extend/pass/pass_registry_tutorial.h b/mindspore-lite/examples/converter_extend/pass/pass_registry_tutorial.h index 1272fa62..43889d76 100644 --- a/mindspore-lite/examples/converter_extend/pass/pass_registry_tutorial.h +++ b/mindspore-lite/examples/converter_extend/pass/pass_registry_tutorial.h @@ -19,7 +19,7 @@ #include "include/registry/pass_base.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class PassTutorial : public registry::PassBase { public: @@ -33,5 +33,5 @@ class PassTutorial : public registry::PassBase { api::AnfNodePtr CreateCustomOp(const api::FuncGraphPtr func_graph, const api::CNodePtr &cnode); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXAMPLES_CONVERTER_EXTEND_PASS_PASS_REGISTRY_TUTORIAL_H diff --git a/mindspore-lite/examples/runtime_extend/src/custom_add_infer.cc b/mindspore-lite/examples/runtime_extend/src/custom_add_infer.cc index d61489a3..39625db8 100644 --- a/mindspore-lite/examples/runtime_extend/src/custom_add_infer.cc +++ b/mindspore-lite/examples/runtime_extend/src/custom_add_infer.cc @@ -18,7 +18,7 @@ #include "include/api/status.h" #include "include/registry/register_kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { /** * CustomAddInfer is a child class to infer current node output's information, including format, data_type and shape. * if inputs' shape exist -1, don't worry, which shows that shape will be inferred when running. @@ -45,4 +45,4 @@ class CustomAddInfer : public kernel::KernelInterface { }; std::shared_ptr CustomAddInferCreator() { return std::make_shared(); } REGISTER_CUSTOM_KERNEL_INTERFACE(Tutorial, Custom_Add, CustomAddInferCreator) -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/examples/runtime_extend/src/custom_add_kernel.cc b/mindspore-lite/examples/runtime_extend/src/custom_add_kernel.cc index 6b8bf216..7866cfdd 100644 --- a/mindspore-lite/examples/runtime_extend/src/custom_add_kernel.cc +++ b/mindspore-lite/examples/runtime_extend/src/custom_add_kernel.cc @@ -22,7 +22,7 @@ #include "include/registry/register_kernel_interface.h" #include "include/registry/register_kernel.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { const auto kFloat32 = DataType::kNumberTypeFloat32; @@ -112,4 +112,4 @@ std::shared_ptr CustomAddCreator(const std::vector &inputs, co } REGISTER_CUSTOM_KERNEL(CPU, Tutorial, kFloat32, Custom_Add, CustomAddCreator) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/examples/runtime_extend/src/custom_common.cc b/mindspore-lite/examples/runtime_extend/src/custom_common.cc index 4a4a0442..c26bce2d 100644 --- a/mindspore-lite/examples/runtime_extend/src/custom_common.cc +++ b/mindspore-lite/examples/runtime_extend/src/custom_common.cc @@ -17,7 +17,7 @@ #include "src/custom_common.h" #include -namespace mindspore { +namespace mindspore::lite { namespace common { Status CheckInputs(const std::vector &inputs) { for (auto &input : inputs) { @@ -39,4 +39,4 @@ Status CheckOutputs(const std::vector &outputs) { return kSuccess; } } // namespace common -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/examples/runtime_extend/src/custom_common.h b/mindspore-lite/examples/runtime_extend/src/custom_common.h index 77c408cb..39d009ff 100644 --- a/mindspore-lite/examples/runtime_extend/src/custom_common.h +++ b/mindspore-lite/examples/runtime_extend/src/custom_common.h @@ -21,7 +21,7 @@ #include "include/api/types.h" #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { namespace common { // verify that the inputs' shape is inferred successfully when inferring current node. Status CheckInputs(const std::vector &inputs); @@ -29,5 +29,5 @@ Status CheckInputs(const std::vector &inputs); // versify that the outputs' shape is inferred successfully when running current node. Status CheckOutputs(const std::vector &inputs); } // namespace common -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXAMPLES_RUNTIME_REGISTRY_SRC_CUSTOM_COMMON_H diff --git a/mindspore-lite/examples/runtime_gpu_extend/src/custom_add_infer.cc b/mindspore-lite/examples/runtime_gpu_extend/src/custom_add_infer.cc index 43a435f1..7f6439d8 100644 --- a/mindspore-lite/examples/runtime_gpu_extend/src/custom_add_infer.cc +++ b/mindspore-lite/examples/runtime_gpu_extend/src/custom_add_infer.cc @@ -18,7 +18,7 @@ #include "include/errorcode.h" #include "include/registry/register_kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { /** * CustomAddInfer is a child class to infer current node output's information, including format, data_type and shape. * if inputs' shape exist -1, don't worry, which shows that shape will be inferred when running. @@ -47,4 +47,4 @@ class CustomAddInfer : public kernel::KernelInterface { }; std::shared_ptr CustomAddInferCreator() { return std::make_shared(); } REGISTER_CUSTOM_KERNEL_INTERFACE(Tutorial, Custom_Add, CustomAddInferCreator) -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/examples/runtime_gpu_extend/src/custom_add_kernel_gpu.cc b/mindspore-lite/examples/runtime_gpu_extend/src/custom_add_kernel_gpu.cc index 904a11f5..9172a772 100644 --- a/mindspore-lite/examples/runtime_gpu_extend/src/custom_add_kernel_gpu.cc +++ b/mindspore-lite/examples/runtime_gpu_extend/src/custom_add_kernel_gpu.cc @@ -30,7 +30,7 @@ #define UP_ROUND(x, y) (((x) + (y) - (1)) / (y) * (y)) -namespace mindspore { +namespace mindspore::lite { namespace custom_gpu_demo { class CustomAddKernelGpu : public kernel::Kernel { public: @@ -260,4 +260,4 @@ using schema::PrimitiveType_AddFusion; // Register the add operator to replace the internal add operator of MindSpore Lite REGISTER_KERNEL(GPU, Tutorial, kFloat32, PrimitiveType_AddFusion, custom_gpu_demo::CustomAddCreator) REGISTER_KERNEL(GPU, Tutorial, kFloat16, PrimitiveType_AddFusion, custom_gpu_demo::CustomAddFP16Creator) -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/examples/runtime_gpu_extend/src/custom_common.cc b/mindspore-lite/examples/runtime_gpu_extend/src/custom_common.cc index 946efeaf..f8013505 100644 --- a/mindspore-lite/examples/runtime_gpu_extend/src/custom_common.cc +++ b/mindspore-lite/examples/runtime_gpu_extend/src/custom_common.cc @@ -16,7 +16,7 @@ #include "src/custom_common.h" -namespace mindspore { +namespace mindspore::lite { namespace custom_common { int CheckInputs(const std::vector &inputs) { for (auto &input : inputs) { @@ -72,4 +72,4 @@ void PackNHWCToNHWC4(void *src, void *dst, bool src_is_fp16, bool dst_is_fp16, c } } } // namespace custom_common -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/examples/runtime_gpu_extend/src/custom_common.h b/mindspore-lite/examples/runtime_gpu_extend/src/custom_common.h index 00c1d2fa..51d91c5e 100644 --- a/mindspore-lite/examples/runtime_gpu_extend/src/custom_common.h +++ b/mindspore-lite/examples/runtime_gpu_extend/src/custom_common.h @@ -27,7 +27,7 @@ #define UP_DIV(x, y) (((x) + (y) - (1)) / (y)) #define C4NUM 4 -namespace mindspore { +namespace mindspore::lite { namespace custom_common { template void Broadcast2GpuShape(DstT *dst, const SrcT *src, int src_num) { @@ -124,5 +124,5 @@ int CheckOutputs(const std::vector &inputs); void PackNHWCToNHWC4(void *src, void *dst, bool src_is_fp16, bool dst_is_fp16, const GpuTensorInfo &tensor, mindspore::DataType data_type = mindspore::DataType::kNumberTypeFloat32); } // namespace custom_common -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXAMPLES_RUNTIME_GPU_EXTEND_SRC_CUSTOM_COMMON_H diff --git a/mindspore-lite/include/api/multi_model_runner.h b/mindspore-lite/include/api/multi_model_runner.h index d95ae976..02937769 100644 --- a/mindspore-lite/include/api/multi_model_runner.h +++ b/mindspore-lite/include/api/multi_model_runner.h @@ -25,7 +25,7 @@ #include "include/api/context.h" #include "include/api/dual_abi_helper.h" -namespace mindspore { +namespace mindspore::lite { using ConfigInfos = std::map>; class ModelImpl; class MS_API ModelExecutor { @@ -141,6 +141,6 @@ Status MultiModelRunner::Build(const std::string &model_path, const ModelType &m return Build(StringToChar(model_path), model_type, model_context); } -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_MULTI_MODEL_RUNNER_H_ diff --git a/mindspore-lite/include/converter.h b/mindspore-lite/include/converter.h index 39f80fa1..12c55061 100644 --- a/mindspore-lite/include/converter.h +++ b/mindspore-lite/include/converter.h @@ -25,7 +25,7 @@ #include "include/registry/converter_context.h" #include "include/api/dual_abi_helper.h" -namespace mindspore { +namespace mindspore::lite { struct ConverterPara; /// \brief Converter provides C++ API for user to integrate model conversion into user application. /// @@ -186,5 +186,5 @@ Status Converter::Convert(converter::FmkType fmk_type, const std::string &model_ const std::string &weight_file) { return Convert(fmk_type, StringToChar(model_file), StringToChar(output_file), StringToChar(weight_file)); } -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_CONVERTER_H_ diff --git a/mindspore-lite/include/kernel_interface.h b/mindspore-lite/include/kernel_interface.h index 127e2a2a..ca2d139c 100644 --- a/mindspore-lite/include/kernel_interface.h +++ b/mindspore-lite/include/kernel_interface.h @@ -22,7 +22,7 @@ #include "include/api/status.h" #include "schema/model_generated.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class Kernel; /// \brief KernelInterface defined customized op's interface, such as infershape, and so on. @@ -57,6 +57,6 @@ class MS_API KernelInterface { } }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_KERNEL_INTERFACE_H_ diff --git a/mindspore-lite/include/registry/converter_context.h b/mindspore-lite/include/registry/converter_context.h index 3ed26741..05c7b1ce 100644 --- a/mindspore-lite/include/registry/converter_context.h +++ b/mindspore-lite/include/registry/converter_context.h @@ -23,7 +23,7 @@ #include "include/api/types.h" #include "include/api/dual_abi_helper.h" -namespace mindspore { +namespace mindspore::lite { namespace converter { constexpr auto KConverterParam = "converter_parameters"; constexpr auto KCommonQuantParam = "common_quant_param"; @@ -90,6 +90,6 @@ class MS_API ConverterContext { static std::map, std::vector> GetConfigInfo(const std::vector &§ion); }; } // namespace converter -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_REGISTRY_CONVERTER_CONTEXT_H_ diff --git a/mindspore-lite/include/registry/model_parser.h b/mindspore-lite/include/registry/model_parser.h index df56ecfe..b584cbb6 100644 --- a/mindspore-lite/include/registry/model_parser.h +++ b/mindspore-lite/include/registry/model_parser.h @@ -20,7 +20,7 @@ #include "mindapi/ir/func_graph.h" #include "include/registry/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace converter { /// \brief ModelParser defined a base class to parse model. class MS_API ModelParser { @@ -42,6 +42,6 @@ class MS_API ModelParser { api::FuncGraphPtr res_graph_ = nullptr; }; } // namespace converter -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_REGISTRY_MODEL_PARSER_H_ diff --git a/mindspore-lite/include/registry/model_parser_registry.h b/mindspore-lite/include/registry/model_parser_registry.h index 7c2529a1..f3f0e7ab 100644 --- a/mindspore-lite/include/registry/model_parser_registry.h +++ b/mindspore-lite/include/registry/model_parser_registry.h @@ -21,7 +21,7 @@ #include "include/registry/converter_context.h" using mindspore::converter::FmkType; -namespace mindspore { +namespace mindspore::lite { namespace converter { class ModelParser; } // namespace converter @@ -56,6 +56,6 @@ class MS_API ModelParserRegistry { #define REG_MODEL_PARSER(fmk, parserCreator) \ static mindspore::registry::ModelParserRegistry g_##type##fmk##ModelParserReg(fmk, parserCreator); } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_REGISTRY_MODEL_PARSER_REGISTRY_H_ diff --git a/mindspore-lite/include/registry/node_parser.h b/mindspore-lite/include/registry/node_parser.h index 2d987eff..f888f252 100644 --- a/mindspore-lite/include/registry/node_parser.h +++ b/mindspore-lite/include/registry/node_parser.h @@ -43,7 +43,7 @@ struct SubGraphT; struct ModelT; } // namespace tflite -namespace mindspore { +namespace mindspore::lite { namespace ops { /// \brief PrimitiveC defined a base class for storing properties using BaseOperatorPtr = api::SharedPtr; @@ -109,6 +109,6 @@ class MS_API NodeParser { /// \brief NodeParserPtr defined a shared_ptr type. using NodeParserPtr = std::shared_ptr; } // namespace converter -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_REGISTRY_NODE_PARSER_H_ diff --git a/mindspore-lite/include/registry/node_parser_registry.h b/mindspore-lite/include/registry/node_parser_registry.h index 2ae17d77..d90d7d16 100644 --- a/mindspore-lite/include/registry/node_parser_registry.h +++ b/mindspore-lite/include/registry/node_parser_registry.h @@ -22,7 +22,7 @@ #include "include/registry/node_parser.h" #include "include/api/dual_abi_helper.h" -namespace mindspore { +namespace mindspore::lite { namespace registry { /// \brief NodeParserRegistry defined registration of NodeParser. class MS_API NodeParserRegistry { @@ -68,6 +68,6 @@ converter::NodeParserPtr NodeParserRegistry::GetNodeParser(converter::FmkType fm #define REG_NODE_PARSER(fmk_type, node_type, node_parser) \ static mindspore::registry::NodeParserRegistry g_##fmk_type##node_type##ParserReg(fmk_type, #node_type, node_parser); } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_REGISTRY_NODE_PARSER_REGISTRY_H_ diff --git a/mindspore-lite/include/registry/pass_base.h b/mindspore-lite/include/registry/pass_base.h index ec4e88be..83f10057 100644 --- a/mindspore-lite/include/registry/pass_base.h +++ b/mindspore-lite/include/registry/pass_base.h @@ -22,7 +22,7 @@ #include "include/api/types.h" #include "mindapi/ir/func_graph.h" -namespace mindspore { +namespace mindspore::lite { namespace registry { /// \brief PassBase defined a base class, which provides an interface for user to operate FuncGraph. class MS_API PassBase { @@ -49,5 +49,5 @@ class MS_API PassBase { /// \brief PassBasePtr defined a shared_ptr type. using PassBasePtr = std::shared_ptr; } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_REGISTRY_PASS_BASE_H_ diff --git a/mindspore-lite/include/registry/pass_registry.h b/mindspore-lite/include/registry/pass_registry.h index 38049513..a9adf2a2 100644 --- a/mindspore-lite/include/registry/pass_registry.h +++ b/mindspore-lite/include/registry/pass_registry.h @@ -23,7 +23,7 @@ #include "include/api/types.h" #include "include/api/dual_abi_helper.h" -namespace mindspore { +namespace mindspore::lite { namespace registry { class PassBase; using PassBasePtr = std::shared_ptr; @@ -95,6 +95,6 @@ PassBasePtr PassRegistry::GetPassFromStoreRoom(const std::string &pass_name) { /// \param[in] names Define the names of the passes. #define REG_SCHEDULED_PASS(position, names) static mindspore::registry::PassRegistry g_##position(position, names); } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_REGISTRY_PASS_REGISTRY_H_ diff --git a/mindspore-lite/include/registry/register_kernel.h b/mindspore-lite/include/registry/register_kernel.h index 764675b8..17cd381d 100644 --- a/mindspore-lite/include/registry/register_kernel.h +++ b/mindspore-lite/include/registry/register_kernel.h @@ -28,7 +28,7 @@ #include "include/api/data_type.h" #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { namespace registry { /// \brief KernelDesc defined kernel's basic attribute. struct KernelDesc { @@ -178,6 +178,6 @@ CreateKernel RegisterKernel::GetCreator(const schema::Primitive *primitive, Kern #op_type, creator); \ } // namespace } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_REGISTRY_REGISTER_KERNEL_H_ diff --git a/mindspore-lite/include/registry/register_kernel_interface.h b/mindspore-lite/include/registry/register_kernel_interface.h index 93869ec4..e71cf2cf 100644 --- a/mindspore-lite/include/registry/register_kernel_interface.h +++ b/mindspore-lite/include/registry/register_kernel_interface.h @@ -24,7 +24,7 @@ #include "include/kernel_interface.h" #include "schema/model_generated.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class Kernel; } @@ -134,6 +134,6 @@ std::shared_ptr RegisterKernelInterface::GetKernelInter creator); \ } // namespace } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_REGISTRY_REGISTER_KERNEL_INTERFACE_H_ diff --git a/mindspore-lite/include/train/metrics.h b/mindspore-lite/include/train/metrics.h index 47e9e62e..3166f3f8 100644 --- a/mindspore-lite/include/train/metrics.h +++ b/mindspore-lite/include/train/metrics.h @@ -21,7 +21,7 @@ #include #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { namespace session { class Metrics { @@ -33,5 +33,5 @@ class Metrics { }; } // namespace session -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_TRAIN_METRICS_H_ diff --git a/mindspore-lite/include/train/train_loop.h b/mindspore-lite/include/train/train_loop.h index 7f08107b..a1874199 100644 --- a/mindspore-lite/include/train/train_loop.h +++ b/mindspore-lite/include/train/train_loop.h @@ -23,7 +23,7 @@ #include "include/train/metrics.h" #include "src/litert/lite_session.h" -namespace mindspore { +namespace mindspore::lite { class MSTensor; namespace dataset { @@ -100,5 +100,5 @@ class TrainLoop { LoadDataFunc load_func, int max_steps) = 0; }; } // namespace session -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INCLUDE_TRAIN_TRAIN_LOOP_H_ diff --git a/mindspore-lite/minddata/dataset/api/data_helper.cc b/mindspore-lite/minddata/dataset/api/data_helper.cc index fbe7bd12..75f00fb7 100644 --- a/mindspore-lite/minddata/dataset/api/data_helper.cc +++ b/mindspore-lite/minddata/dataset/api/data_helper.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/util/json_helper.h" #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Create a numbered json file from image folder Status DataHelper::CreateAlbumIF(const std::vector &in_dir, const std::vector &out_dir) { @@ -188,4 +188,4 @@ size_t DataHelper::DumpData(const unsigned char *tensor_addr, const size_t &tens return jh.DumpData(tensor_addr, tensor_size, addr, buffer_size); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/api/datasets.cc b/mindspore-lite/minddata/dataset/api/datasets.cc index 8bd4fc5d..e9e9a9d1 100644 --- a/mindspore-lite/minddata/dataset/api/datasets.cc +++ b/mindspore-lite/minddata/dataset/api/datasets.cc @@ -138,7 +138,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yes_no_node.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // convert MSTensorVec to DE TensorRow, return empty if fails TensorRow VecToRow(const MSTensorVec &v) { @@ -2183,4 +2183,4 @@ YesNoDataset::YesNoDataset(const std::vector &dataset_dir, const std::refe } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/api/execute.cc b/mindspore-lite/minddata/dataset/api/execute.cc index c8cd9f76..63ee0c6c 100644 --- a/mindspore-lite/minddata/dataset/api/execute.cc +++ b/mindspore-lite/minddata/dataset/api/execute.cc @@ -36,7 +36,7 @@ namespace platform = mindspore::lite; -namespace mindspore { +namespace mindspore::lite { namespace dataset { using json = nlohmann::json; @@ -744,4 +744,4 @@ void ExecuteRun_C(const std::vector> &data_gra } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/api/iterator.cc b/mindspore-lite/minddata/dataset/api/iterator.cc index ed099ea2..645f09e5 100644 --- a/mindspore-lite/minddata/dataset/api/iterator.cc +++ b/mindspore-lite/minddata/dataset/api/iterator.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/runtime_context.h" #include "mindspore-lite/minddata/dataset/include/dataset/datasets.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Iterator::Iterator() : consumer_(nullptr) {} @@ -180,4 +180,4 @@ Iterator::_Iterator &Iterator::_Iterator::operator++() { return *this; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/api/python/python_mp.h b/mindspore-lite/minddata/dataset/api/python/python_mp.h index fe60695f..6e1d5263 100644 --- a/mindspore-lite/minddata/dataset/api/python/python_mp.h +++ b/mindspore-lite/minddata/dataset/api/python/python_mp.h @@ -30,7 +30,7 @@ namespace py = pybind11; #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class PythonMultiprocessingRuntime { public: @@ -92,5 +92,5 @@ class PyPythonMultiprocessingRuntime : public PythonMultiprocessingRuntime { }; #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_API_PYTHON_MP_H_ diff --git a/mindspore-lite/minddata/dataset/api/samplers.cc b/mindspore-lite/minddata/dataset/api/samplers.cc index 2a0f5676..480dfe98 100644 --- a/mindspore-lite/minddata/dataset/api/samplers.cc +++ b/mindspore-lite/minddata/dataset/api/samplers.cc @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_sampler_ir.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/weighted_random_sampler_ir.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status Sampler::BuildChildren(std::shared_ptr *const sampler) const { RETURN_UNEXPECTED_IF_NULL(sampler); @@ -138,4 +138,4 @@ std::shared_ptr WeightedRandomSampler::Parse() const { return output; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/api/transforms.cc b/mindspore-lite/minddata/dataset/api/transforms.cc index d3bee47a..616ea1f1 100644 --- a/mindspore-lite/minddata/dataset/api/transforms.cc +++ b/mindspore-lite/minddata/dataset/api/transforms.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/data/transforms_ir.h" #include "ir/dtype/type_id.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Transform operations for data. namespace transforms { @@ -292,4 +292,4 @@ std::shared_ptr Unique::Parse() { #endif } // namespace transforms } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/api/vision.cc b/mindspore-lite/minddata/dataset/api/vision.cc index 7bd5df1c..bb74089a 100644 --- a/mindspore-lite/minddata/dataset/api/vision.cc +++ b/mindspore-lite/minddata/dataset/api/vision.cc @@ -111,7 +111,7 @@ #include "mindspore-lite/minddata/dataset/core/type_id.h" #include "ir/dtype/type_id.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Transform operations for computer vision. namespace vision { @@ -1550,4 +1550,4 @@ Status WritePng(const std::string &filename, const mindspore::MSTensor &image, i #endif // not ENABLE_ANDROID } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/callback/callback_manager.cc b/mindspore-lite/minddata/dataset/callback/callback_manager.cc index 36af5c11..adb96734 100644 --- a/mindspore-lite/minddata/dataset/callback/callback_manager.cc +++ b/mindspore-lite/minddata/dataset/callback/callback_manager.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { void CallbackManager::AddCallbacks(std::vector> callbacks) { @@ -143,4 +143,4 @@ Status CallbackManager::StepEnd(const CallbackParam &cb_param) { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/callback/callback_manager.h b/mindspore-lite/minddata/dataset/callback/callback_manager.h index d37ce532..933e3436 100644 --- a/mindspore-lite/minddata/dataset/callback/callback_manager.h +++ b/mindspore-lite/minddata/dataset/callback/callback_manager.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/callback/ds_callback.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // forward declare to avoid cyclic include of dataset_op.h @@ -85,6 +85,6 @@ class CallbackManager { std::vector step_end_indices_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CALLBACK_MANAGER_H diff --git a/mindspore-lite/minddata/dataset/callback/callback_param.h b/mindspore-lite/minddata/dataset/callback/callback_param.h index 70f46569..fcf4980c 100644 --- a/mindspore-lite/minddata/dataset/callback/callback_param.h +++ b/mindspore-lite/minddata/dataset/callback/callback_param.h @@ -19,7 +19,7 @@ #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// Callback Param is the object a DatasetOp uses to pass run-time information to user defined function. @@ -38,5 +38,5 @@ class CallbackParam { const int64_t cur_step_num_; // step number since the first row }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CALLBACK_PARAM_H diff --git a/mindspore-lite/minddata/dataset/callback/ds_callback.h b/mindspore-lite/minddata/dataset/callback/ds_callback.h index d2beca10..50c16d08 100644 --- a/mindspore-lite/minddata/dataset/callback/ds_callback.h +++ b/mindspore-lite/minddata/dataset/callback/ds_callback.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/callback/callback_param.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DSCallback { @@ -98,6 +98,6 @@ class DSCallback { int32_t step_size_; // step begin/end will be called every step_size_ }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_DS_CALLBACK_H diff --git a/mindspore-lite/minddata/dataset/callback/py_ds_callback.cc b/mindspore-lite/minddata/dataset/callback/py_ds_callback.cc index cf3fe821..46af478f 100644 --- a/mindspore-lite/minddata/dataset/callback/py_ds_callback.cc +++ b/mindspore-lite/minddata/dataset/callback/py_ds_callback.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/callback/callback_manager.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status PyDSCallback::DSBegin(const CallbackParam &cb_param) { @@ -87,4 +87,4 @@ void PyDSCallback::SetStepEnd(const py::function &f) { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/callback/py_ds_callback.h b/mindspore-lite/minddata/dataset/callback/py_ds_callback.h index c65a23f5..a78e8e8b 100644 --- a/mindspore-lite/minddata/dataset/callback/py_ds_callback.h +++ b/mindspore-lite/minddata/dataset/callback/py_ds_callback.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "pybind11/pybind11.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace py = pybind11; @@ -127,5 +127,5 @@ class PyDSCallback : public DSCallback { bool step_end_needed_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_PY_DS_CALLBACK_H diff --git a/mindspore-lite/minddata/dataset/core/ascend_resource.cc b/mindspore-lite/minddata/dataset/core/ascend_resource.cc index 37bc1973..5b03c106 100644 --- a/mindspore-lite/minddata/dataset/core/ascend_resource.cc +++ b/mindspore-lite/minddata/dataset/core/ascend_resource.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status AscendResource::InitResource(uint32_t device_id) { ResourceInfo resource; @@ -105,4 +105,4 @@ void *AscendResource::GetContext() { return AclAdapter::GetInstance().GetContext void *AscendResource::GetStream() { return AclAdapter::GetInstance().GetStreamFromAclProcess(processor_.get()); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/ascend_resource.h b/mindspore-lite/minddata/dataset/core/ascend_resource.h index e705ad5d..bdf7870d 100644 --- a/mindspore-lite/minddata/dataset/core/ascend_resource.h +++ b/mindspore-lite/minddata/dataset/core/ascend_resource.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/core/device_tensor.h" #include "mindspore-lite/minddata/dataset/core/tensor.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class AscendResource : public DeviceResource { @@ -52,6 +52,6 @@ class AscendResource : public DeviceResource { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_ASCEND_RESOURCE_H_ diff --git a/mindspore-lite/minddata/dataset/core/client.cc b/mindspore-lite/minddata/dataset/core/client.cc index 2485ae03..9cb4ddf2 100644 --- a/mindspore-lite/minddata/dataset/core/client.cc +++ b/mindspore-lite/minddata/dataset/core/client.cc @@ -16,7 +16,7 @@ #include "mindspore-lite/minddata/dataset/core/client.h" #include "mindspore-lite/minddata/dataset/util/services.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // This is a one-time global initializer which includes the call to instantiate singletons. // It is external api call and not a member of the GlobalContext directly. @@ -25,4 +25,4 @@ Status GlobalInit() { return (Services::CreateInstance()); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/client.h b/mindspore-lite/minddata/dataset/core/client.h index 1ba0ea70..3e0be48f 100644 --- a/mindspore-lite/minddata/dataset/core/client.h +++ b/mindspore-lite/minddata/dataset/core/client.h @@ -54,12 +54,12 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // This is a one-time global initializer that needs to be called at the // start of any minddata applications. extern Status GlobalInit(); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_CLIENT_H_ diff --git a/mindspore-lite/minddata/dataset/core/config_manager.cc b/mindspore-lite/minddata/dataset/core/config_manager.cc index 831405f2..573d30b3 100644 --- a/mindspore-lite/minddata/dataset/core/config_manager.cc +++ b/mindspore-lite/minddata/dataset/core/config_manager.cc @@ -26,7 +26,7 @@ #include "util/path.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { ConfigManager::ConfigManager() : num_parallel_workers_(kCfgParallelWorkers), @@ -224,4 +224,4 @@ Status ConfigManager::set_enable_autotune(bool enable, bool save_autoconfig, con } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/config_manager.h b/mindspore-lite/minddata/dataset/core/config_manager.h index 7a177686..2d400ee4 100644 --- a/mindspore-lite/minddata/dataset/core/config_manager.h +++ b/mindspore-lite/minddata/dataset/core/config_manager.h @@ -34,7 +34,7 @@ // } // -namespace mindspore { +namespace mindspore::lite { namespace dataset { const char kEmptyString[] = ""; const char kJsonExtension[] = ".json"; @@ -380,5 +380,5 @@ class ConfigManager { std::string start_method_; // fork or spawn }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_CONFIG_MANAGER_H_ diff --git a/mindspore-lite/minddata/dataset/core/cv_tensor.cc b/mindspore-lite/minddata/dataset/core/cv_tensor.cc index 7bf829ce..4eb619e8 100644 --- a/mindspore-lite/minddata/dataset/core/cv_tensor.cc +++ b/mindspore-lite/minddata/dataset/core/cv_tensor.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { CVTensor::CVTensor(std::shared_ptr tensor) : Tensor(std::move(*tensor)) { (void)this->MatInit(GetMutableBuffer(), shape_, type_, &mat_); @@ -159,4 +159,4 @@ Status CVTensor::MatAtIndex(const std::vector &index, cv::Mat *mat) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/cv_tensor.h b/mindspore-lite/minddata/dataset/core/cv_tensor.h index 1a84f4d6..1c65c479 100644 --- a/mindspore-lite/minddata/dataset/core/cv_tensor.h +++ b/mindspore-lite/minddata/dataset/core/cv_tensor.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/core/data_type.h" #include "mindspore-lite/minddata/dataset/core/tensor.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using CVTensorPtr = std::shared_ptr; class CVTensor : public Tensor { @@ -108,5 +108,5 @@ class CVTensor : public Tensor { Status MatInit(uchar *data, const TensorShape &shape, const DataType &type, cv::Mat *mat); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_CV_TENSOR_H_ diff --git a/mindspore-lite/minddata/dataset/core/data_type.cc b/mindspore-lite/minddata/dataset/core/data_type.cc index 48bf54e2..47afccf1 100644 --- a/mindspore-lite/minddata/dataset/core/data_type.cc +++ b/mindspore-lite/minddata/dataset/core/data_type.cc @@ -19,7 +19,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { uint8_t DataType::SizeInBytes() const { if (type_ < DataType::NUM_OF_TYPES) { @@ -189,4 +189,4 @@ std::string DataType::GetPybindFormat() const { } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/data_type.h b/mindspore-lite/minddata/dataset/core/data_type.h index 06621384..2dcb2897 100644 --- a/mindspore-lite/minddata/dataset/core/data_type.h +++ b/mindspore-lite/minddata/dataset/core/data_type.h @@ -34,7 +34,7 @@ namespace py = pybind11; #endif #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Class that represents basic data types in DataEngine. class DataType { @@ -394,5 +394,5 @@ inline bool DataType::IsLooselyCompatible() const { return type_ == DataType::DE_STRING || type_ == DataType::DE_BYTES; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DATA_TYPE_H_ diff --git a/mindspore-lite/minddata/dataset/core/de_tensor.cc b/mindspore-lite/minddata/dataset/core/de_tensor.cc index cd22f2a3..96ccbacd 100644 --- a/mindspore-lite/minddata/dataset/core/de_tensor.cc +++ b/mindspore-lite/minddata/dataset/core/de_tensor.cc @@ -25,7 +25,7 @@ #define EXCEPTION_IF_NULL(ptr) MS_ASSERT((ptr) != nullptr) #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { DETensor::DETensor(std::shared_ptr tensor_impl) @@ -123,4 +123,4 @@ std::shared_ptr DETensor::Clone() const { return std::make_shared(tensor_impl_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/de_tensor.h b/mindspore-lite/minddata/dataset/core/de_tensor.h index b90c30c3..95aaca14 100644 --- a/mindspore-lite/minddata/dataset/core/de_tensor.h +++ b/mindspore-lite/minddata/dataset/core/de_tensor.h @@ -24,7 +24,7 @@ #include "include/api/visible.h" #include "ir/api_tensor_impl.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class Tensor; class DeviceTensor; @@ -67,5 +67,5 @@ class DETensor : public mindspore::MSTensor::Impl { std::vector shape_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DETENSOR_H_ diff --git a/mindspore-lite/minddata/dataset/core/device_resource.cc b/mindspore-lite/minddata/dataset/core/device_resource.cc index e77f847a..ab2f54a3 100644 --- a/mindspore-lite/minddata/dataset/core/device_resource.cc +++ b/mindspore-lite/minddata/dataset/core/device_resource.cc @@ -16,7 +16,7 @@ #include "mindspore-lite/minddata/dataset/core/device_resource.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status DeviceResource::InitResource(uint32_t) { @@ -67,4 +67,4 @@ void *DeviceResource::GetStream() { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/device_resource.h b/mindspore-lite/minddata/dataset/core/device_resource.h index 013bdc50..218d6a80 100644 --- a/mindspore-lite/minddata/dataset/core/device_resource.h +++ b/mindspore-lite/minddata/dataset/core/device_resource.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/core/device_tensor.h" #include "mindspore-lite/minddata/dataset/core/tensor.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DeviceResource { @@ -51,5 +51,5 @@ class DeviceResource { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_DEVICE_RESOURCE_H diff --git a/mindspore-lite/minddata/dataset/core/device_tensor.cc b/mindspore-lite/minddata/dataset/core/device_tensor.cc index 11bf0c66..98567e54 100644 --- a/mindspore-lite/minddata/dataset/core/device_tensor.cc +++ b/mindspore-lite/minddata/dataset/core/device_tensor.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int kYuvDefaultChannels = 4; @@ -176,4 +176,4 @@ Status DeviceTensor::DataPop_(std::shared_ptr *host_tensor) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/device_tensor.h b/mindspore-lite/minddata/dataset/core/device_tensor.h index cbe9e01c..612fcc0c 100644 --- a/mindspore-lite/minddata/dataset/core/device_tensor.h +++ b/mindspore-lite/minddata/dataset/core/device_tensor.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class Tensor; class DATASET_API DeviceTensor : public Tensor { @@ -78,5 +78,5 @@ class DATASET_API DeviceTensor : public Tensor { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_DEVICE_TENSOR_H_ diff --git a/mindspore-lite/minddata/dataset/core/global_context.cc b/mindspore-lite/minddata/dataset/core/global_context.cc index 1e8abb6d..24e1a8c1 100644 --- a/mindspore-lite/minddata/dataset/core/global_context.cc +++ b/mindspore-lite/minddata/dataset/core/global_context.cc @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/util/allocator.h" #include "mindspore-lite/minddata/dataset/util/system_pool.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Global static pointer for the singleton GlobalContext std::unique_ptr GlobalContext::global_context_ = nullptr; @@ -73,4 +73,4 @@ void GlobalContext::Print(std::ostream &out) const { out << "GlobalContext contains the following default config: " << *config_manager_ << "\n"; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/global_context.h b/mindspore-lite/minddata/dataset/core/global_context.h index 0fb9f931..592d7fab 100644 --- a/mindspore-lite/minddata/dataset/core/global_context.h +++ b/mindspore-lite/minddata/dataset/core/global_context.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" #include "mindspore-lite/minddata/dataset/util/allocator.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // forward declare class MemoryPool; @@ -116,6 +116,6 @@ class GlobalContext { std::shared_ptr profiler_manager_; // ProfilerManager instance for all trees }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_GLOBAL_CONTEXT_H_ diff --git a/mindspore-lite/minddata/dataset/core/tensor.cc b/mindspore-lite/minddata/dataset/core/tensor.cc index 919edd38..6486d9e1 100644 --- a/mindspore-lite/minddata/dataset/core/tensor.cc +++ b/mindspore-lite/minddata/dataset/core/tensor.cc @@ -41,7 +41,7 @@ namespace py = pybind11; #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Helper macros for printing tensor elements #define CASE_PRINT(de_type, native_type) \ @@ -1315,4 +1315,4 @@ Status Tensor::CreateFromMSTensor(const MSTensor &in, TensorPtr *out) { (const uchar *)(in.Data().get()), in.DataSize(), out); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/tensor.h b/mindspore-lite/minddata/dataset/core/tensor.h index b4059de7..f849efa0 100644 --- a/mindspore-lite/minddata/dataset/core/tensor.h +++ b/mindspore-lite/minddata/dataset/core/tensor.h @@ -49,7 +49,7 @@ namespace py = pybind11; #endif -namespace mindspore::dataset { +namespace mindspore::lite::dataset { class Tensor; template class Allocator; @@ -909,5 +909,5 @@ inline Status Tensor::CreateScalar(const std::string &item, TensorP RETURN_UNEXPECTED_IF_NULL(out); return CreateFromVector({item}, TensorShape::CreateScalar(), DataType(DataType::DE_STRING), out); } -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_TENSOR_H_ diff --git a/mindspore-lite/minddata/dataset/core/tensor_helpers.cc b/mindspore-lite/minddata/dataset/core/tensor_helpers.cc index da327c72..02418048 100644 --- a/mindspore-lite/minddata/dataset/core/tensor_helpers.cc +++ b/mindspore-lite/minddata/dataset/core/tensor_helpers.cc @@ -19,7 +19,7 @@ #include "include/dataset/constants.h" #include "include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { void IndexGeneratorHelper(int8_t depth, std::vector *numbers, @@ -78,4 +78,4 @@ std::vector> IndexGenerator(const std::vector *numbers, const std /// \return std::vector> 2D vector of generated indices, M x (slice_list).size() std::vector> IndexGenerator(const std::vector &slice_list); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_TENSOR_HELPERS_H_ diff --git a/mindspore-lite/minddata/dataset/core/tensor_row.cc b/mindspore-lite/minddata/dataset/core/tensor_row.cc index 19bce776..d3af2e09 100644 --- a/mindspore-lite/minddata/dataset/core/tensor_row.cc +++ b/mindspore-lite/minddata/dataset/core/tensor_row.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/core/config_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { TensorRow::TensorRow() noexcept @@ -242,4 +242,4 @@ std::string RowTimer::Summary(const std::vector &specified_op) { return ss.str(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/tensor_row.h b/mindspore-lite/minddata/dataset/core/tensor_row.h index 5c87cdd5..8db0ce10 100644 --- a/mindspore-lite/minddata/dataset/core/tensor_row.h +++ b/mindspore-lite/minddata/dataset/core/tensor_row.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TensorRow; // A set of Tensor pointers with an id @@ -341,5 +341,5 @@ class TensorRow { static Status ValidateTensorRow(const TensorRow &input, const DataType &data_type); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_TENSOR_ROW_H_ diff --git a/mindspore-lite/minddata/dataset/core/tensor_shape.cc b/mindspore-lite/minddata/dataset/core/tensor_shape.cc index f52d184f..40866340 100644 --- a/mindspore-lite/minddata/dataset/core/tensor_shape.cc +++ b/mindspore-lite/minddata/dataset/core/tensor_shape.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr dsize_t TensorShape::kDimUnknown; @@ -247,4 +247,4 @@ Status TensorShape::ToFlatIndex(const std::vector &index, dsize_t *flat return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/core/tensor_shape.h b/mindspore-lite/minddata/dataset/core/tensor_shape.h index 8f85d7d8..c5b377c5 100644 --- a/mindspore-lite/minddata/dataset/core/tensor_shape.h +++ b/mindspore-lite/minddata/dataset/core/tensor_shape.h @@ -37,7 +37,7 @@ namespace py = pybind11; #include "mindspore-lite/minddata/dataset/core/global_context.h" #include "mindspore-lite/minddata/dataset/util/allocator.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Class that represents a shape of a Tensor. A shape can be: // -# Known shape (mKnown = true) @@ -214,5 +214,5 @@ class DATASET_API TensorShape { void AddListToShape(const T &list); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CORE_TENSOR_SHAPE_H_ diff --git a/mindspore-lite/minddata/dataset/core/type_id.h b/mindspore-lite/minddata/dataset/core/type_id.h index a3f3c4ed..a8a912a5 100644 --- a/mindspore-lite/minddata/dataset/core/type_id.h +++ b/mindspore-lite/minddata/dataset/core/type_id.h @@ -19,7 +19,7 @@ #include "ir/dtype/type_id.h" #include "mindspore-lite/minddata/dataset/core/data_type.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { inline dataset::DataType MSTypeToDEType(const TypeId data_type) { switch (data_type) { @@ -87,6 +87,6 @@ inline TypeId DETypeToMSType(dataset::DataType data_type) { } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_TYPEID_H_ diff --git a/mindspore-lite/minddata/dataset/core/types.cc b/mindspore-lite/minddata/dataset/core/types.cc index ffcc564d..57db8c47 100644 --- a/mindspore-lite/minddata/dataset/core/types.cc +++ b/mindspore-lite/minddata/dataset/core/types.cc @@ -26,7 +26,7 @@ #include "utils/convert_utils_base.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { class Buffer::Impl { public: Impl() : data_() {} @@ -523,4 +523,4 @@ bool Buffer::SetData(const void *data, size_t data_len) { } std::vector CharVersion() { return {}; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/connector.h b/mindspore-lite/minddata/dataset/engine/connector.h index 1f45c4c1..3238080b 100644 --- a/mindspore-lite/minddata/dataset/engine/connector.h +++ b/mindspore-lite/minddata/dataset/engine/connector.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/services.h" #include "mindspore-lite/minddata/dataset/util/cond_var.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Connector is a communication data structure between two group of threads that // preserve the order. @@ -206,6 +206,6 @@ class Connector { std::atomic out_buffers_count_ = 0; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CONNECTOR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/consumers/pull_based_tree_consumer.cc b/mindspore-lite/minddata/dataset/engine/consumers/pull_based_tree_consumer.cc index 384cd459..0d1f9fb0 100644 --- a/mindspore-lite/minddata/dataset/engine/consumers/pull_based_tree_consumer.cc +++ b/mindspore-lite/minddata/dataset/engine/consumers/pull_based_tree_consumer.cc @@ -18,7 +18,7 @@ #include -namespace mindspore::dataset { +namespace mindspore::lite::dataset { Status PullBasedIteratorConsumer::Init(const std::shared_ptr &root) { return tree_adapter_lite_->Compile(root, num_epochs_); } @@ -258,4 +258,4 @@ Status TreeGetters::GetFirstRowShapeAndType() { first_row_obtained_ = true; return Status::OK(); } -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset diff --git a/mindspore-lite/minddata/dataset/engine/consumers/pull_based_tree_consumer.h b/mindspore-lite/minddata/dataset/engine/consumers/pull_based_tree_consumer.h index c5304766..ec99efaa 100644 --- a/mindspore-lite/minddata/dataset/engine/consumers/pull_based_tree_consumer.h +++ b/mindspore-lite/minddata/dataset/engine/consumers/pull_based_tree_consumer.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.h" #include "mindspore-lite/minddata/dataset/engine/tree_adapter_lite.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { class TreeAdapterLite; class TensorRow; @@ -122,5 +122,5 @@ class TreeGetters : public PullBasedIteratorConsumer { Status InternalInit(); }; -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CONSUMERS_PULL_BASED_TREE_CONSUMER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.cc b/mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.cc index 66d7d532..8fb20cdf 100644 --- a/mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.cc +++ b/mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.cc @@ -38,7 +38,7 @@ #include "utils/ms_context.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { using ProfilingRegistrationState = ProfilingManager::ProfilingRegistrationState; // TreeConsumer @@ -948,4 +948,4 @@ Status DatasetSizeGetter::Terminate() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.h b/mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.h index 502eb6b4..ea8d6a23 100644 --- a/mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.h +++ b/mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/tree_adapter.h" #include "mindspore-lite/minddata/dataset/include/dataset/text.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { // Forward declare class TreeAdapter; class DatasetNode; @@ -310,5 +310,5 @@ class BuildVocabConsumer : public TreeConsumer { /// \return string std::string Name() override { return "BuildVocab"; } }; -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_CONSUMERS_TREE_CONSUMER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/data_schema.cc b/mindspore-lite/minddata/dataset/engine/data_schema.cc index 59438ba0..42730ae9 100644 --- a/mindspore-lite/minddata/dataset/engine/data_schema.cc +++ b/mindspore-lite/minddata/dataset/engine/data_schema.cc @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor_shape.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // A macro for converting an input string representing the column type to it's actual // numeric column type. @@ -488,4 +488,4 @@ Status DataSchema::GetColumnName(std::vector *column_names) const { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/data_schema.h b/mindspore-lite/minddata/dataset/engine/data_schema.h index 59e5dcfd..cffd265a 100644 --- a/mindspore-lite/minddata/dataset/engine/data_schema.h +++ b/mindspore-lite/minddata/dataset/engine/data_schema.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor_shape.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class ColDescriptor data_schema.h /// \brief A simple class to provide meta info about a column. @@ -212,6 +212,6 @@ class DataSchema { int64_t num_rows_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATA_SCHEMA_H_ diff --git a/mindspore-lite/minddata/dataset/engine/dataset_iterator.cc b/mindspore-lite/minddata/dataset/engine/dataset_iterator.cc index 08e19165..1b54c3bd 100644 --- a/mindspore-lite/minddata/dataset/engine/dataset_iterator.cc +++ b/mindspore-lite/minddata/dataset/engine/dataset_iterator.cc @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.h" #include "mindspore-lite/minddata/dataset/engine/perf/profiling.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Fetches one row of data from the iterator as a column map. Status DatasetIterator::GetNextAsMap(TensorMap *out_map) { @@ -223,4 +223,4 @@ std::unordered_map ChildIterator::GetColumnNameMap() const return current_op_->child(child_idx_)->column_name_id_map(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/dataset_iterator.h b/mindspore-lite/minddata/dataset/engine/dataset_iterator.h index 40fee7e9..1e116e25 100644 --- a/mindspore-lite/minddata/dataset/engine/dataset_iterator.h +++ b/mindspore-lite/minddata/dataset/engine/dataset_iterator.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/engine/perf/dataset_iterator_tracing.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using TensorMap = std::unordered_map>; @@ -122,6 +122,6 @@ class ChildIterator { bool eof_handled_; // T/F if this op got an eof }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASET_ITERATOR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/barrier_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/barrier_op.cc index a6e72611..28114885 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/barrier_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/barrier_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" #include "mindspore-lite/minddata/dataset/core/config_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Construct BarrierOp here, local variables initialized in operator due to tree construction restrictions BarrierOp::BarrierOp(int32_t op_connector_size, const std::string &condition_name, py::function condition_func) @@ -157,4 +157,4 @@ Status BarrierOp::EoeReceived(int32_t) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/barrier_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/barrier_op.h index 6f18b685..296836d0 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/barrier_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/barrier_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declare class ExecutionTree; @@ -94,6 +94,6 @@ class BarrierOp : public PipelineOp { py::function condition_function_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_BARRIER_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/batch_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/batch_op.cc index 10210c3b..b3f905c0 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/batch_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/batch_op.cc @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #ifdef ENABLE_PYTHON BatchOp::BatchOp(int32_t batch_size, bool drop, bool pad, int32_t op_queue_size, int32_t num_workers, @@ -1018,4 +1018,4 @@ std::vector BatchOp::GetMPWorkerPIDs() const { return DatasetOp::GetMPWorkerPIDs(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/batch_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/batch_op.h index fb44999e..0c0a654f 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/batch_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/batch_op.h @@ -33,7 +33,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using PadInfo = std::map>>; @@ -257,6 +257,6 @@ class BatchOp : public ParallelOp, CBatc bool eoe_received_ = false; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_BATCH_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc index 0ca2d159..f6abe21e 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" namespace py = pybind11; -namespace mindspore { +namespace mindspore::lite { namespace dataset { BucketBatchByLengthOp::BucketBatchByLengthOp(const std::vector &length_dependent_columns, const std::vector &bucket_boundaries, @@ -238,4 +238,4 @@ Status BucketBatchByLengthOp::GetNextRowPullMode(TensorRow *const row) { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.h index b1b3d56d..6ec8acf7 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class BucketBatchByLengthOp : public PipelineOp { @@ -95,6 +95,6 @@ class BucketBatchByLengthOp : public PipelineOp { std::vector> buckets_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_BUCKET_BATCH_BY_LENGTH_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc index 41d0df7e..04a54302 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/core/config_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { BuildSentencePieceVocabOp::BuildSentencePieceVocabOp(std::shared_ptr vocab, const std::vector col_names, int32_t vocab_size, @@ -185,4 +185,4 @@ void BuildSentencePieceVocabOp::DatasetSentenceIterator::Next() { s_p_vocab_ptr_->Next(&value_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.h index 951dfa4a..0afce7e8 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/build_sentence_piece_vocab_op.h @@ -33,7 +33,7 @@ #include "mindspore-lite/minddata/dataset/util/queue.h" #include "pybind11/pybind11.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace py = pybind11; @@ -96,5 +96,5 @@ class BuildSentencePieceVocabOp : public PipelineOp { std::unique_ptr> sentence_queue_; // master thread assigns each worker TensorRow via this }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_DATASETOPS_BUILD_SENTENCE_VOCAB_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/build_vocab_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/build_vocab_op.cc index 2fd8a44e..e2d450d3 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/build_vocab_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/build_vocab_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/core/config_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { BuildVocabOp::BuildVocabOp(std::shared_ptr vocab, std::vector col_names, std::pair freq_r, int64_t top_k, const std::vector &tokens, @@ -213,4 +213,4 @@ void BuildVocabOp::Print(std::ostream &out, bool show_all) const { } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/build_vocab_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/build_vocab_op.h index fd8e1cac..9c47cd37 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/build_vocab_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/build_vocab_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/util/queue.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class BuildVocabOp : public ParallelOp { public: @@ -86,5 +86,5 @@ class BuildVocabOp : public ParallelOp { std::unordered_map word_cnt_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_BUILD_VOCAB_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/cache_base_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/cache_base_op.cc index d78b6d76..fa7564c0 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/cache_base_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/cache_base_op.cc @@ -17,7 +17,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // A print method typically used for debugging void CacheBase::Print(std::ostream &out, bool show_all) const { @@ -321,4 +321,4 @@ Status CacheBase::Prefetcher(int32_t worker_id) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/cache_base_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/cache_base_op.h index 5f7fb8f4..9b5adf40 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/cache_base_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/cache_base_op.h @@ -32,7 +32,7 @@ #include "mindspore-lite/minddata/dataset/util/queue_map.h" #include "mindspore-lite/minddata/dataset/util/semaphore.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief This is the base class for CacheOp and CacheLookupOp which share many similarities. /// \see CacheOp @@ -117,6 +117,6 @@ class CacheBase : public ParallelOp, TensorRow> { Status GetPrefetchRow(row_id_type row_id, TensorRow *out); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_CACHE_BASE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/cache_lookup_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/cache_lookup_op.cc index ba9370a4..c7a7d0dc 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/cache_lookup_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/cache_lookup_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status CacheLookupOp::operator()() { if (!sampler_) { @@ -95,4 +95,4 @@ Status CacheLookupOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/cache_lookup_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/cache_lookup_op.h index a636d9b1..5c3fb58c 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/cache_lookup_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/cache_lookup_op.h @@ -23,7 +23,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/datasetops/cache_base_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief provides a memory/disk cache that acts as a save-point within a mappable dataset. /// \note For non-mappable dataset, please see CacheOp @@ -59,6 +59,6 @@ class CacheLookupOp : public CacheBase, public SamplerRT { Status RegisterResources() override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_CACHE_LOOKUP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.cc index 640050c5..eac77cc3 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/util/system_pool.h" #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { CacheMergeOp::~CacheMergeOp() = default; void CacheMergeOp::Print(std::ostream &out, bool show_all) const { @@ -319,4 +319,4 @@ Status CacheMergeOp::TensorRowCacheRequest::CheckCacheResult() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.h index 3ba8b004..c111b35a 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.h @@ -32,7 +32,7 @@ #include "mindspore-lite/minddata/dataset/util/queue_map.h" #include "mindspore-lite/minddata/dataset/util/semaphore.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Provides method to merge two streams (one from CacheLookup and one from cache miss stream) into one single /// stream @@ -146,5 +146,5 @@ class CacheMergeOp : public ParallelOp { Status Cleaner(); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_CACHE_MERGE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/cache_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/cache_op.cc index 8c433f96..13829a27 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/cache_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/cache_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor of CacheOp CacheOp::CacheOp(int32_t num_workers, int32_t op_connector_size, std::shared_ptr cache_client, @@ -217,4 +217,4 @@ Status CacheOp::PrepareOperator() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/cache_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/cache_op.h index 169d0419..aa07d942 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/cache_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/cache_op.h @@ -22,7 +22,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/datasetops/cache_base_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief CacheOp provides a memory/disk cache that acts as a save-point within a non-mappable dataset. /// \note For mappable dataset, please see CacheLookupOp. @@ -94,6 +94,6 @@ class CacheOp : public CacheBase, public RandomAccessOp { Status RegisterResources() override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_CACHE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/concat_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/concat_op.cc index c10d889f..7b0dee62 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/concat_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/concat_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor of the ConcatOp. ConcatOp::ConcatOp(const std::shared_ptr &sampler, @@ -321,4 +321,4 @@ Status ConcatOp::GetNextRowPullMode(TensorRow *const row) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/concat_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/concat_op.h index c1a82ce3..9ddb0a0a 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/concat_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/concat_op.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/distributed_sampler.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/random_sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ConcatOp : public PipelineOp { public: @@ -117,6 +117,6 @@ class ConcatOp : public PipelineOp { std::mt19937 rnd_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_CONCAT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/data_queue_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/data_queue_op.cc index aed4248e..ae025c02 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/data_queue_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/data_queue_op.cc @@ -32,7 +32,7 @@ #include "include/backend/distributed/embedding_cache/data_queue_manager.h" #include "utils/ms_context.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { #ifdef WITH_BACKEND using distributed::DataQueueManager; @@ -1311,4 +1311,4 @@ Status DataQueueOp::SendDataToAscendDynamic() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/data_queue_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/data_queue_op.h index 21719c0d..e09bf0ff 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/data_queue_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/data_queue_op.h @@ -35,7 +35,7 @@ #include "mindspore-lite/minddata/dataset/util/circular_pool.h" #include "mindspore/ccsrc/include/runtime/hardware_abstract/data_queue/data_queue.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class GpuConnector; using DATA_INFO = std::vector>; @@ -226,5 +226,5 @@ class DataQueueOp : public PipelineOp { bool enable_prefetch_cache_pipeline_{false}; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_DATA_QUEUE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.cc index 78f395b7..15047c45 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.cc @@ -33,7 +33,7 @@ #include "utils/system/crc32c.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor DatasetOp::DatasetOp(int32_t op_connector_size, std::shared_ptr sampler) @@ -510,4 +510,4 @@ Status DatasetOp::Launch() { Status DatasetOp::Terminate() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.h index 8d69a0f1..5521a66a 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/engine/perf/info_collector.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr char kBarrierOp[] = "BarrierOp"; @@ -429,6 +429,6 @@ class DatasetOp : public std::enable_shared_from_this { void set_tree(ExecutionTree *tree) { tree_ = tree; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_DATASET_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/epoch_ctrl_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/epoch_ctrl_op.cc index ea6c212a..519f8c98 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/epoch_ctrl_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/epoch_ctrl_op.cc @@ -18,7 +18,7 @@ #include #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor EpochCtrlOp::EpochCtrlOp(int32_t num_epoch) : RepeatOp(num_epoch) { MS_LOG(INFO) << "Welcome to Epoch Ctrl Op."; } @@ -111,4 +111,4 @@ Status EpochCtrlOp::GetNextRowPullMode(TensorRow *const row) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/epoch_ctrl_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/epoch_ctrl_op.h index f57174d0..501ee111 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/epoch_ctrl_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/epoch_ctrl_op.h @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/repeat_op.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class EpochCtrlOp : public RepeatOp { public: @@ -63,6 +63,6 @@ class EpochCtrlOp : public RepeatOp { ImplementedPullMode PullModeImplementationStatus() const override { return ImplementedPullMode::Implemented; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_DATASETOPS_EPOCH_CTRL_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/filter_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/filter_op.cc index 2fb0329c..528f0dcc 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/filter_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/filter_op.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { FilterOp::FilterOp(const std::vector &in_col_names, int32_t num_workers, int32_t op_queue_size, std::shared_ptr predicate_func) @@ -198,4 +198,4 @@ Status FilterOp::GetNextRowPullMode(TensorRow *const row) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/filter_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/filter_op.h index 15f554e6..3f6715f0 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/filter_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/filter_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/queue.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { enum filterCtrl : int8_t { kFilterEmpty = 0, kFilterPartial = 1, kFilterFull = 2, kFilterEoe = 3, kFilterEof = 4 }; @@ -119,5 +119,5 @@ class FilterOp : public ParallelOp { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/cpu_map_job.cc b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/cpu_map_job.cc index 8715e013..2c10c5d0 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/cpu_map_job.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/cpu_map_job.cc @@ -19,7 +19,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/datasetops/map_op/cpu_map_job.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor @@ -56,4 +56,4 @@ Status CpuMapJob::Run(std::vector in, std::vector *out) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/cpu_map_job.h b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/cpu_map_job.h index 3814a9e9..96029699 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/cpu_map_job.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/cpu_map_job.h @@ -20,7 +20,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_job.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CpuMapJob : public MapJob { public: @@ -48,6 +48,6 @@ class CpuMapJob : public MapJob { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_DATASETOPS_MAP_OP_CPU_MAP_JOB_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/gpu_map_job.cc b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/gpu_map_job.cc index 2bcd3e72..5c685164 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/gpu_map_job.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/gpu_map_job.cc @@ -16,7 +16,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/map_op/gpu_map_job.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor @@ -25,4 +25,4 @@ GpuMapJob::GpuMapJob(std::vector> operations) : MapJob // Destructor GpuMapJob::~GpuMapJob() = default; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/gpu_map_job.h b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/gpu_map_job.h index 93262fb8..da835d61 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/gpu_map_job.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/gpu_map_job.h @@ -20,7 +20,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_job.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class GpuMapJob : public MapJob { public: @@ -45,6 +45,6 @@ class GpuMapJob : public MapJob { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_DATASETOPS_MAP_OP_GPU_MAP_JOB_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_job.h b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_job.h index 92d98962..b47fdbea 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_job.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_job.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace util { static inline Status RebuildMapErrorMsg(const TensorRow &input_row, const std::string &op_name, Status *rc) { @@ -92,6 +92,6 @@ class MapJob { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_DATASETOPS_MAP_OP_MAP_JOB_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_op.cc index 3cddbb89..48b3cb76 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_op.cc @@ -41,7 +41,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { using TensorOpVector = std::vector>; @@ -920,4 +920,4 @@ Status MapOp::ReleaseResource(int32_t worker_id) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_op.h index 131d2ee6..82a71137 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_op.h @@ -32,7 +32,7 @@ #include "mindspore-lite/minddata/dataset/util/queue.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declare class ExecutionTree; @@ -251,6 +251,6 @@ class MapOp : public ParallelOp, TensorRow> { Status ReleaseResource(int32_t worker_id); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_MAP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/npu_map_job.cc b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/npu_map_job.cc index 0859fde8..0b90b352 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/npu_map_job.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/npu_map_job.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/core/device_tensor_ascend910b.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor @@ -149,4 +149,4 @@ Status NpuMapJob::Run(std::vector in, std::vector *out, return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/npu_map_job.h b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/npu_map_job.h index 5ecc5d02..a2cb3e9f 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/map_op/npu_map_job.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/map_op/npu_map_job.h @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/map_op/map_job.h" #include "runtime/hardware_abstract/device_context/device_context.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class NpuMapJob : public MapJob { public: @@ -47,6 +47,6 @@ class NpuMapJob : public MapJob { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_DATASETOPS_MAP_OP_NPU_MAP_JOB_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h index 3b1dee09..64b72f55 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr int64_t kCachedRowsSize = 16; @@ -463,6 +463,6 @@ class ParallelOp : public DatasetOp { int32_t current_repeats_{0}; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_PARALLEL_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.cc index 002bcb01..27084983 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.cc @@ -16,7 +16,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h" #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor PipelineOp::PipelineOp(int32_t op_connector_size, std::shared_ptr sampler) @@ -46,4 +46,4 @@ void PipelineOp::Print(std::ostream &out, bool show_all) const { } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h index 391c1cb3..a2c26bd1 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h @@ -21,7 +21,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // forward declare class ExecutionTree; @@ -67,6 +67,6 @@ class PipelineOp : public DatasetOp { // trash. }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_PIPELINE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/project_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/project_op.cc index 9345959c..58b208ef 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/project_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/project_op.cc @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { ProjectOp::ProjectOp(const std::vector &columns_to_project) : PipelineOp(0), columns_to_project_(columns_to_project) {} @@ -127,4 +127,4 @@ Status ProjectOp::GetNextRowPullMode(TensorRow *const row) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/project_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/project_op.h index bf93784a..3c4d7d98 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/project_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/project_op.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ProjectOp : public PipelineOp { public: @@ -96,6 +96,6 @@ class ProjectOp : public PipelineOp { Status ComputeColMap() override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_PROJECT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/receive_bridge_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/receive_bridge_op.cc index bdc2f6ab..b96d5cb3 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/receive_bridge_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/receive_bridge_op.cc @@ -31,7 +31,7 @@ #include "mindspore-lite/minddata/dataset/util/task_manager.h" #include "mindspore-lite/minddata/dataset/util/monitor.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor of ReceiveBridgeOp ReceiveBridgeOp::ReceiveBridgeOp(int32_t op_connector_size, SharedMemoryQueue receive_queue, MessageQueue msg_queue) @@ -317,4 +317,4 @@ Status ReceiveBridgeOp::GetNextRowPullMode(TensorRow *const row) { MessageQueue ReceiveBridgeOp::GetMessageQueue() { return msg_queue_; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/receive_bridge_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/receive_bridge_op.h index 91d71b5b..ce3c55e6 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/receive_bridge_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/receive_bridge_op.h @@ -32,7 +32,7 @@ #include "mindspore-lite/minddata/dataset/core/shared_memory_queue.h" #include "mindspore-lite/minddata/dataset/core/message_queue.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int kMonitorInterval = 1; const int kSleepDelays = 2; @@ -125,6 +125,6 @@ class ReceiveBridgeOp : public ParallelOp { ImplementedPullMode PullModeImplementationStatus() const override { return ImplementedPullMode::Implemented; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_RECEIVE_BRIDGE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/rename_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/rename_op.cc index 30187e8b..a8d90a46 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/rename_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/rename_op.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/core/config_manager.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // constructor RenameOp::RenameOp(const std::vector &in_col_names, const std::vector &out_col_names) @@ -143,4 +143,4 @@ void RenameOp::Print(std::ostream &out, // In: The output stream to print t } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/rename_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/rename_op.h index 2683ddd0..5f6bff90 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/rename_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/rename_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RenameOp : public PipelineOp { public: @@ -89,6 +89,6 @@ class RenameOp : public PipelineOp { std::unique_ptr child_iterator_; // An iterator for fetching. }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_RENAME_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/repeat_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/repeat_op.cc index eabf12d6..a8d49f44 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/repeat_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/repeat_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor of the RepeatOp. RepeatOp::RepeatOp(int32_t count) : PipelineOp(0), num_repeats_(count), repeat_count_(0) {} @@ -161,4 +161,4 @@ Status RepeatOp::GetNextRowPullMode(TensorRow *const row) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/repeat_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/repeat_op.h index 26a3e9d8..f2cab1e3 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/repeat_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/repeat_op.h @@ -22,7 +22,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RepeatOp : public PipelineOp { public: @@ -114,6 +114,6 @@ class RepeatOp : public PipelineOp { ImplementedPullMode PullModeImplementationStatus() const override { return ImplementedPullMode::Implemented; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_REPEAT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/send_bridge_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/send_bridge_op.cc index 74fbc1ec..d478d67e 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/send_bridge_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/send_bridge_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor of SendBridgeOp SendBridgeOp::SendBridgeOp(int32_t op_connector_size, SharedMemoryQueue send_queue, MessageQueue msg_queue) @@ -224,4 +224,4 @@ MessageQueue SendBridgeOp::GetMessageQueue() { return msg_queue_; } SharedMemoryQueue SendBridgeOp::GetSharedMemoryQueue() { return send_queue_; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/send_bridge_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/send_bridge_op.h index 78767133..28fe1172 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/send_bridge_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/send_bridge_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/core/shared_memory_queue.h" #include "mindspore-lite/minddata/dataset/core/message_queue.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SendBridgeOp : public ParallelOp { public: @@ -114,6 +114,6 @@ class SendBridgeOp : public ParallelOp { ImplementedPullMode PullModeImplementationStatus() const override { return ImplementedPullMode::Implemented; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SEND_BRIDGE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/shuffle_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/shuffle_op.cc index aab017ba..7372740e 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/shuffle_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/shuffle_op.cc @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr int32_t ShuffleOp::kShuffleStateInit; constexpr int32_t ShuffleOp::kShuffleStateActive; @@ -298,4 +298,4 @@ Status ShuffleOp::GetNextRowPullMode(TensorRow *const row) { void ShuffleOp::Skip(int64_t skip_steps) { rng_.discard(skip_steps); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/shuffle_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/shuffle_op.h index 63850e08..79a383b3 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/shuffle_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/shuffle_op.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ShuffleOp : public PipelineOp { @@ -144,6 +144,6 @@ class ShuffleOp : public PipelineOp { bool eof_received_{false}; // flag to indicate if eof is reached in pull mode. }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SHUFFLE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/skip_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/skip_op.cc index 695c12f7..860efefa 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/skip_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/skip_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/core/config_manager.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor of the SkipOp. SkipOp::SkipOp(int32_t count) : PipelineOp(0), max_skips_(count), skip_count_(0) {} @@ -122,4 +122,4 @@ Status SkipOp::GetNextRowPullMode(TensorRow *const row) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/skip_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/skip_op.h index 2c5ad67f..0911d8fd 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/skip_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/skip_op.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h" #include "mindspore-lite/minddata/dataset/engine/dataset_iterator.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SkipOp : public PipelineOp { public: @@ -72,6 +72,6 @@ class SkipOp : public PipelineOp { std::unique_ptr child_iterator_; // An iterator for fetching. }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SKIP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.cc index ccde88e8..7bced607 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.cc @@ -1,55 +1,55 @@ -/** - * Copyright 2021-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.h" - -#include - -#include "mindspore-lite/minddata/dataset/core/config_manager.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -#include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" - -namespace mindspore { -namespace dataset { -AGNewsOp::AGNewsOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, int32_t op_connector_size, - bool shuffle_files, int32_t num_devices, int32_t device_id, char field_delim, - const std::vector> &column_default, - const std::vector &column_name, const std::vector &ag_news_list) - : CsvOp(ag_news_list, field_delim, column_default, column_name, num_workers, num_samples, worker_connector_size, - op_connector_size, shuffle_files, num_devices, device_id) {} - -// A print method typically used for debugging. -void AGNewsOp::Print(std::ostream &out, bool show_all) const { - if (!show_all) { - // Call the super class for displaying any common 1-liner info. - ParallelOp::Print(out, show_all); - // Then show any custom derived-internal 1-liner info for this op. - out << "\n"; - } else { - // Call the super class for displaying any common detailed info. - ParallelOp::Print(out, show_all); - // Then show any custom derived-internal stuff - out << "\nSample count: " << total_rows_ << "\nDevice id: " << device_id_ << "\nNumber of devices: " << num_devices_ - << "\nShuffle files: " << ((shuffle_files_) ? "yes" : "no") << "\nAGNews files list:\n"; - for (int i = 0; i < csv_files_list_.size(); ++i) { - out << " " << csv_files_list_[i]; - } - out << "\n\n"; - } -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.h" + +#include + +#include "mindspore-lite/minddata/dataset/core/config_manager.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" +#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" +#include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" + +namespace mindspore::lite { +namespace dataset { +AGNewsOp::AGNewsOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, int32_t op_connector_size, + bool shuffle_files, int32_t num_devices, int32_t device_id, char field_delim, + const std::vector> &column_default, + const std::vector &column_name, const std::vector &ag_news_list) + : CsvOp(ag_news_list, field_delim, column_default, column_name, num_workers, num_samples, worker_connector_size, + op_connector_size, shuffle_files, num_devices, device_id) {} + +// A print method typically used for debugging. +void AGNewsOp::Print(std::ostream &out, bool show_all) const { + if (!show_all) { + // Call the super class for displaying any common 1-liner info. + ParallelOp::Print(out, show_all); + // Then show any custom derived-internal 1-liner info for this op. + out << "\n"; + } else { + // Call the super class for displaying any common detailed info. + ParallelOp::Print(out, show_all); + // Then show any custom derived-internal stuff + out << "\nSample count: " << total_rows_ << "\nDevice id: " << device_id_ << "\nNumber of devices: " << num_devices_ + << "\nShuffle files: " << ((shuffle_files_) ? "yes" : "no") << "\nAGNews files list:\n"; + for (int i = 0; i < csv_files_list_.size(); ++i) { + out << " " << csv_files_list_[i]; + } + out << "\n\n"; + } +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.h index 662d0e1f..4118f7ad 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.h @@ -1,77 +1,77 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AG_NEWS_OP_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AG_NEWS_OP_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -#include "mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.h" -#include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" -#include "mindspore-lite/minddata/dataset/util/auto_index.h" - -namespace mindspore { -namespace dataset { -class JaggedConnector; - -class AGNewsOp : public CsvOp { - public: - /// \brief Constructor. - /// \param[in] num_workers Number of workers reading images in parallel - /// \param[in] num_samples The number of samples to be included in the dataset. - /// (Default = 0 means all samples). - /// \param[in] worker_connector_size Size of each internal queue. - /// \param[in] op_connector_size Size of each queue in the connector that the child operator pulls from. - /// \param[in] shuffle_files Whether or not to shuffle the files before reading data. - /// \param[in] num_devices Number of devices that the dataset should be divided into. (Default = 1) - /// \param[in] device_id The device ID within num_devices. This argument should be - /// specified only when num_devices is also specified (Default = 0). - /// \param[in] field_delim A char that indicates the delimiter to separate fields (default=','). - /// \param[in] column_default List of default values for the CSV field (default={}). Each item in the list is - /// either a valid type (float, int, or string). If this is not provided, treats all columns as string type. - /// \param[in] column_name List of column names of the dataset (default={}). If this is not provided, infers the - /// column_names from the first row of CSV file. - /// \param[in] ag_news_list List of files to be read to search for a pattern of files. The list - /// will be sorted in a lexicographical order. - AGNewsOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, int32_t op_connector_size, - bool shuffle_files, int32_t num_devices, int32_t device_id, char field_delim, - const std::vector> &column_default, const std::vector &column_name, - const std::vector &ag_news_list); - - /// \brief Default destructor. - ~AGNewsOp() = default; - - /// \brief A print method typically used for debugging. - /// \param[in] out he output stream to write output to. - /// \param[in] show_all A bool to control if you want to show all info or just a - /// summary. - void Print(std::ostream &out, bool show_all) const override; - - /// \brief Op name getter. - /// \return Name of the current Op. - std::string Name() const override { return "AGNewsOp"; } - - // DatasetName name getter - // \return DatasetName of the current Op - std::string DatasetName(bool upper = false) const { return upper ? "AGNews" : "ag news"; } -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AG_NEWS_OP_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AG_NEWS_OP_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AG_NEWS_OP_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" +#include "mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.h" +#include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" +#include "mindspore-lite/minddata/dataset/util/auto_index.h" + +namespace mindspore::lite { +namespace dataset { +class JaggedConnector; + +class AGNewsOp : public CsvOp { + public: + /// \brief Constructor. + /// \param[in] num_workers Number of workers reading images in parallel + /// \param[in] num_samples The number of samples to be included in the dataset. + /// (Default = 0 means all samples). + /// \param[in] worker_connector_size Size of each internal queue. + /// \param[in] op_connector_size Size of each queue in the connector that the child operator pulls from. + /// \param[in] shuffle_files Whether or not to shuffle the files before reading data. + /// \param[in] num_devices Number of devices that the dataset should be divided into. (Default = 1) + /// \param[in] device_id The device ID within num_devices. This argument should be + /// specified only when num_devices is also specified (Default = 0). + /// \param[in] field_delim A char that indicates the delimiter to separate fields (default=','). + /// \param[in] column_default List of default values for the CSV field (default={}). Each item in the list is + /// either a valid type (float, int, or string). If this is not provided, treats all columns as string type. + /// \param[in] column_name List of column names of the dataset (default={}). If this is not provided, infers the + /// column_names from the first row of CSV file. + /// \param[in] ag_news_list List of files to be read to search for a pattern of files. The list + /// will be sorted in a lexicographical order. + AGNewsOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, int32_t op_connector_size, + bool shuffle_files, int32_t num_devices, int32_t device_id, char field_delim, + const std::vector> &column_default, const std::vector &column_name, + const std::vector &ag_news_list); + + /// \brief Default destructor. + ~AGNewsOp() = default; + + /// \brief A print method typically used for debugging. + /// \param[in] out he output stream to write output to. + /// \param[in] show_all A bool to control if you want to show all info or just a + /// summary. + void Print(std::ostream &out, bool show_all) const override; + + /// \brief Op name getter. + /// \return Name of the current Op. + std::string Name() const override { return "AGNewsOp"; } + + // DatasetName name getter + // \return DatasetName of the current Op + std::string DatasetName(bool upper = false) const { return upper ? "AGNews" : "ag news"; } +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AG_NEWS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/album_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/album_op.cc index ca7854e3..7d46a093 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/album_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/album_op.cc @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { AlbumOp::AlbumOp(int32_t num_wkrs, std::string file_dir, int32_t queue_size, bool do_decode, const std::set &exts, std::unique_ptr data_schema, @@ -437,4 +437,4 @@ Status AlbumOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/album_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/album_op.h index aff11081..0e93eade 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/album_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/album_op.h @@ -38,7 +38,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares template @@ -174,5 +174,5 @@ class AlbumOp : public MappableLeafOp { std::vector image_rows_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_ALBUM_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.cc old mode 100755 new mode 100644 index b18b325f..7ca58a60 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.cc @@ -1,50 +1,50 @@ -/** - * Copyright 2021-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.h" - -#include - -namespace mindspore { -namespace dataset { -AmazonReviewOp::AmazonReviewOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, - int32_t op_connector_size, bool shuffle_files, int32_t num_devices, int32_t device_id, - char field_delim, const std::vector> &column_default, - const std::vector &column_name, - const std::vector &amazon_review_files_list) - : CsvOp(amazon_review_files_list, field_delim, column_default, column_name, num_workers, num_samples, - worker_connector_size, op_connector_size, shuffle_files, num_devices, device_id) {} - -void AmazonReviewOp::Print(std::ostream &out, bool show_all) const { - if (!show_all) { - // Call the super class for displaying any common 1-liner info. - ParallelOp::Print(out, show_all); - // Then show any custom derived-internal 1-liner info for this op. - out << "\n"; - } else { - // Call the super class for displaying any common detailed info. - ParallelOp::Print(out, show_all); - // Then show any custom derived-internal stuff. - out << "\nSample count: " << total_rows_ << "\nDevice id: " << device_id_ << "\nNumber of devices: " << num_devices_ - << "\nShuffle files: " << ((shuffle_files_) ? "yes" : "no") << "\nAmazonReview files list:\n"; - for (int i = 0; i < csv_files_list_.size(); ++i) { - out << " " << csv_files_list_[i]; - } - out << "\n\n"; - } -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.h" + +#include + +namespace mindspore::lite { +namespace dataset { +AmazonReviewOp::AmazonReviewOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, + int32_t op_connector_size, bool shuffle_files, int32_t num_devices, int32_t device_id, + char field_delim, const std::vector> &column_default, + const std::vector &column_name, + const std::vector &amazon_review_files_list) + : CsvOp(amazon_review_files_list, field_delim, column_default, column_name, num_workers, num_samples, + worker_connector_size, op_connector_size, shuffle_files, num_devices, device_id) {} + +void AmazonReviewOp::Print(std::ostream &out, bool show_all) const { + if (!show_all) { + // Call the super class for displaying any common 1-liner info. + ParallelOp::Print(out, show_all); + // Then show any custom derived-internal 1-liner info for this op. + out << "\n"; + } else { + // Call the super class for displaying any common detailed info. + ParallelOp::Print(out, show_all); + // Then show any custom derived-internal stuff. + out << "\nSample count: " << total_rows_ << "\nDevice id: " << device_id_ << "\nNumber of devices: " << num_devices_ + << "\nShuffle files: " << ((shuffle_files_) ? "yes" : "no") << "\nAmazonReview files list:\n"; + for (int i = 0; i < csv_files_list_.size(); ++i) { + out << " " << csv_files_list_[i]; + } + out << "\n\n"; + } +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.h old mode 100755 new mode 100644 index 10f3a684..9f5da3bd --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.h @@ -1,71 +1,71 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AMAZON_REVIEW_OP_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AMAZON_REVIEW_OP_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" - -namespace mindspore { -namespace dataset { -class JaggedConnector; - -/// \class AmazonReviewOp -/// \brief A Op derived class to represent AmazonReview Op. -class AmazonReviewOp : public CsvOp { - public: - /// \brief Constructor of AmazonReviewOp. - /// \param[in] num_workers Number of worker threads reading data from amazon_review files. - /// \param[in] num_samples The number of samples to be included in the dataset. - /// \param[in] worker_connector_size Size of each internal queue. - /// \param[in] op_connector_size Size of each queue in the connector that the child operator pulls from. - /// \param[in] shuffle_files Whether or not to shuffle the files before reading data. - /// \param[in] num_devices Number of devices that the dataset should be divided into. - /// \param[in] device_id The device ID within num_devices. - /// \param[in] field_delim A char that indicates the delimiter to separate fields. - /// \param[in] column_default List of default values for the CSV field. Each item in the list is - /// either a valid type (float, int, or string). - /// \param[in] column_name List of column names of the dataset. - /// \param[in] amazon_review_files_list List of file paths for the dataset files. - AmazonReviewOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, int32_t op_connector_size, - bool shuffle_files, int32_t num_devices, int32_t device_id, char field_delim, - const std::vector> &column_default, - const std::vector &column_name, const std::vector &amazon_review_files_list); - - /// \brief Destructor. - ~AmazonReviewOp() = default; - - /// \brief A print method typically used for debugging. - /// \param[out] out The output stream to write output to. - /// \param[in] show_all A bool to control if you want to show all info or just a summary. - void Print(std::ostream &out, bool show_all) const override; - - /// \brief DatasetName name getter. - /// \param[in] upper A bool to control if you want to return uppercase or lowercase Op name. - /// \return DatasetName of the current Op. - std::string DatasetName(bool upper = false) const { return upper ? "AmazonReview" : "amazon review"; } - - /// \brief Op name getter. - /// \return Name of the current Op. - std::string Name() const override { return "AmazonReviewOp"; } -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AMAZON_REVIEW_OP_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AMAZON_REVIEW_OP_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AMAZON_REVIEW_OP_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" + +namespace mindspore::lite { +namespace dataset { +class JaggedConnector; + +/// \class AmazonReviewOp +/// \brief A Op derived class to represent AmazonReview Op. +class AmazonReviewOp : public CsvOp { + public: + /// \brief Constructor of AmazonReviewOp. + /// \param[in] num_workers Number of worker threads reading data from amazon_review files. + /// \param[in] num_samples The number of samples to be included in the dataset. + /// \param[in] worker_connector_size Size of each internal queue. + /// \param[in] op_connector_size Size of each queue in the connector that the child operator pulls from. + /// \param[in] shuffle_files Whether or not to shuffle the files before reading data. + /// \param[in] num_devices Number of devices that the dataset should be divided into. + /// \param[in] device_id The device ID within num_devices. + /// \param[in] field_delim A char that indicates the delimiter to separate fields. + /// \param[in] column_default List of default values for the CSV field. Each item in the list is + /// either a valid type (float, int, or string). + /// \param[in] column_name List of column names of the dataset. + /// \param[in] amazon_review_files_list List of file paths for the dataset files. + AmazonReviewOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, int32_t op_connector_size, + bool shuffle_files, int32_t num_devices, int32_t device_id, char field_delim, + const std::vector> &column_default, + const std::vector &column_name, const std::vector &amazon_review_files_list); + + /// \brief Destructor. + ~AmazonReviewOp() = default; + + /// \brief A print method typically used for debugging. + /// \param[out] out The output stream to write output to. + /// \param[in] show_all A bool to control if you want to show all info or just a summary. + void Print(std::ostream &out, bool show_all) const override; + + /// \brief DatasetName name getter. + /// \param[in] upper A bool to control if you want to return uppercase or lowercase Op name. + /// \return DatasetName of the current Op. + std::string DatasetName(bool upper = false) const { return upper ? "AmazonReview" : "amazon review"; } + + /// \brief Op name getter. + /// \return Name of the current Op. + std::string Name() const override { return "AmazonReviewOp"; } +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_AMAZON_REVIEW_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.cc old mode 100755 new mode 100644 index 92223d4c..f9832203 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.cc @@ -1,32 +1,32 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - - * http://www.apache.org/licenses/LICENSE-2.0 - - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. -*/ -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.h" - -#include -#include -#include -#include - -namespace mindspore { -namespace dataset { -const std::set kExts = {".jpg", ".JPEG"}; -const std::map kClassIndex = {}; -CaltechOp::CaltechOp(int32_t num_workers, const std::string &file_dir, int32_t queue_size, bool do_decode, - std::unique_ptr data_schema, std::shared_ptr sampler) - : ImageFolderOp(num_workers, file_dir, queue_size, false, do_decode, kExts, kClassIndex, std::move(data_schema), - std::move(sampler)) {} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + + * http://www.apache.org/licenses/LICENSE-2.0 + + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.h" + +#include +#include +#include +#include + +namespace mindspore::lite { +namespace dataset { +const std::set kExts = {".jpg", ".JPEG"}; +const std::map kClassIndex = {}; +CaltechOp::CaltechOp(int32_t num_workers, const std::string &file_dir, int32_t queue_size, bool do_decode, + std::unique_ptr data_schema, std::shared_ptr sampler) + : ImageFolderOp(num_workers, file_dir, queue_size, false, do_decode, kExts, kClassIndex, std::move(data_schema), + std::move(sampler)) {} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.h old mode 100755 new mode 100644 index 6970b761..685409df --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/caltech_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Read Caltech256 Dataset. class CaltechOp : public ImageFolderOp { @@ -53,5 +53,5 @@ class CaltechOp : public ImageFolderOp { std::string DatasetName(bool upper = false) const { return upper ? "Caltech" : "caltech"; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CALTECH_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/celeba_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/celeba_op.cc index c7161d3f..2833858b 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/celeba_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/celeba_op.cc @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { #ifdef ENABLE_PYTHON CelebAOp::CelebAOp(int32_t num_workers, const std::string &dir, int32_t queue_size, bool decode, @@ -334,4 +334,4 @@ Status CelebAOp::InitPullMode() { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/celeba_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/celeba_op.h index 829568c2..ba671d1f 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/celeba_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/celeba_op.h @@ -40,7 +40,7 @@ } \ } while (false) -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CelebAOp : public MappableLeafOp { public: @@ -139,5 +139,5 @@ class CelebAOp : public MappableLeafOp { #endif }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CELEBA_OP_H diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/cifar_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/cifar_op.cc index 02e4617e..b0fd19e0 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/cifar_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/cifar_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr uint32_t kCifarImageHeight = 32; @@ -402,4 +402,4 @@ Status CifarOp::InitPullMode() { return PrepareData(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/cifar_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/cifar_op.h index 13b1bcf5..1a1b8183 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/cifar_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/cifar_op.h @@ -34,7 +34,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CifarOp : public MappableLeafOp { public: @@ -124,5 +124,5 @@ class CifarOp : public MappableLeafOp { std::vector, std::vector>> cifar_image_label_pairs_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif /// MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CIFAR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/cityscapes_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/cityscapes_op.cc index 37c083fe..3098e8c2 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/cityscapes_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/cityscapes_op.cc @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr char taskSuffix[] = "polygon"; @@ -272,4 +272,4 @@ Status CityscapesOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/cityscapes_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/cityscapes_op.h index 5af37d71..f7988379 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/cityscapes_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/cityscapes_op.h @@ -34,7 +34,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CityscapesOp : public MappableLeafOp { public: @@ -124,5 +124,5 @@ class CityscapesOp : public MappableLeafOp { std::vector> image_task_pairs_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CITYSCAPES_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/clue_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/clue_op.cc index 0d78fef5..f43ea542 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/clue_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/clue_op.cc @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { ClueOp::ClueOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, ColKeyMap cols_to_keyword, std::vector clue_files_list, int32_t op_connector_size, bool shuffle_files, @@ -290,4 +290,4 @@ Status ClueOp::ComputeColMap() { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/clue_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/clue_op.h index 4bedf3f4..ad4a18b9 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/clue_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/clue_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.h" #include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using StringIndex = AutoIndexObj; using ColKeyMap = std::map>; @@ -103,5 +103,5 @@ class ClueOp : public NonMappableLeafOp { ColKeyMap cols_to_keyword_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CLUE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.cc index 71ab0e23..afa860f9 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.cc @@ -1,171 +1,171 @@ -/** - * Copyright 2022-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.h" - -#include - -#include "mindspore-lite/minddata/dataset/audio/kernels/audio_utils.h" -#include "mindspore-lite/minddata/dataset/core/config_manager.h" -#include "mindspore-lite/minddata/dataset/core/tensor_shape.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" -#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -#include "utils/file_utils.h" - -namespace mindspore { -namespace dataset { -const char kDataDirectory[] = "wav"; -const char kLabelDirectory[] = "etc"; -const char kLabelFileName[] = "txt.done.data"; -const char kDataFilePrefix[] = "cmu_us_"; -const char kDataFileSuffix[] = "_arctic"; - -CMUArcticOp::CMUArcticOp(const std::string &dataset_dir, const std::string &name, int32_t num_workers, - int32_t queue_size, std::unique_ptr data_schema, - std::shared_ptr sampler) - : MappableLeafOp(num_workers, queue_size, std::move(sampler)), - folder_path_(dataset_dir), - name_(name), - data_schema_(std::move(data_schema)) {} - -Status CMUArcticOp::LoadTensorRow(row_id_type row_id, TensorRow *trow) { - RETURN_UNEXPECTED_IF_NULL(trow); - const uint32_t sample_rate = 16000; - const std::string wav_suffix = ".wav"; - size_t pos = label_pairs_[row_id].first.find_last_of('_'); - CHECK_FAIL_RETURN_UNEXPECTED( - pos != std::string::npos && pos + 1 < label_pairs_[row_id].first.size(), - "Invalid utterance id, please check if it is in valid format: " + label_pairs_[row_id].first); - std::string utterance_id_t = label_pairs_[row_id].first.substr(pos + 1); - std::string full_name_path = kDataFilePrefix + name_ + kDataFileSuffix; - std::string file_name = label_pairs_[row_id].first + wav_suffix; - Path root_folder(real_path_); - Path wav_file_path = root_folder / full_name_path / kDataDirectory / file_name; - std::shared_ptr waveform, rate, transcript, utterance_id; - RETURN_IF_NOT_OK(ReadAudio(wav_file_path.ToString(), &waveform)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(sample_rate, &rate)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(label_pairs_[row_id].second, &transcript)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(utterance_id_t, &utterance_id)); - (*trow) = TensorRow(row_id, {std::move(waveform), std::move(rate), std::move(transcript), std::move(utterance_id)}); - Path label_dir = root_folder / full_name_path / kLabelDirectory / kLabelFileName; - trow->setPath({wav_file_path.ToString(), wav_file_path.ToString(), label_dir.ToString(), label_dir.ToString()}); - return Status::OK(); -} - -void CMUArcticOp::Print(std::ostream &out, bool show_all) const { - if (!show_all) { - ParallelOp::Print(out, show_all); - out << "\n"; - } else { - ParallelOp::Print(out, show_all); - out << "\nNumber of rows: " << num_rows_ << "\nCMUArctic directory: " << folder_path_ << "\n\n"; - } -} - -Status CMUArcticOp::CountTotalRows(const std::string &dir, const std::string &name, int64_t *count) { - RETURN_UNEXPECTED_IF_NULL(count); - *count = 0; - const int64_t num_samples = 0; - const int64_t start_index = 0; - auto sampler = std::make_shared(start_index, num_samples); - auto schema = std::make_unique(); - - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kCv, 1))); - TensorShape scalar_rate = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); - TensorShape scalar_utterance = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("transcript", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance))); - TensorShape scalar_utterance_id = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("utterance_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance_id))); - std::shared_ptr cfg = GlobalContext::config_manager(); - - int32_t num_workers = cfg->num_parallel_workers(); - int32_t op_connect_size = cfg->op_connector_size(); - auto op = - std::make_shared(dir, name, num_workers, op_connect_size, std::move(schema), std::move(sampler)); - RETURN_IF_NOT_OK(op->PrepareData()); - *count = op->label_pairs_.size(); - return Status::OK(); -} - -Status CMUArcticOp::ComputeColMap() { - if (column_name_id_map_.empty()) { - for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) { - column_name_id_map_[data_schema_->Column(i).Name()] = i; - } - } else { - MS_LOG(WARNING) << "Column name map is already set!"; - } - return Status::OK(); -} - -Status CMUArcticOp::ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform) { - RETURN_UNEXPECTED_IF_NULL(waveform); - const int32_t kWavFileSampleRate = 16000; - int32_t sample_rate = 0; - std::vector waveform_vec; - RETURN_IF_NOT_OK(ReadWaveFile(audio_dir, &waveform_vec, &sample_rate)); - CHECK_FAIL_RETURN_UNEXPECTED( - sample_rate == kWavFileSampleRate, - "Invalid file, sampling rate of CMUArctic wav file must be 16000, file path: " + audio_dir); - RETURN_IF_NOT_OK(Tensor::CreateFromVector(waveform_vec, waveform)); - RETURN_IF_NOT_OK((*waveform)->ExpandDim(0)); - return Status::OK(); -} - -Status CMUArcticOp::PrepareData() { - auto realpath = FileUtils::GetRealPath(folder_path_.c_str()); - if (!realpath.has_value()) { - MS_LOG(ERROR) << "Invalid file path, CMUArctic Dataset dir: " << folder_path_ << " does not exist."; - RETURN_STATUS_UNEXPECTED("Invalid file path, CMUArctic Dataset dir: " + folder_path_ + " does not exist."); - } - real_path_ = realpath.value(); - Path dir(real_path_); - std::string full_name_path = kDataFilePrefix + name_ + kDataFileSuffix; - Path label_dir = dir / full_name_path / kLabelDirectory / kLabelFileName; - CHECK_FAIL_RETURN_UNEXPECTED(label_dir.Exists() && !label_dir.IsDirectory(), - "Invalid file, failed to find label file: " + label_dir.ToString()); - std::ifstream label_reader(label_dir.ToString(), std::ifstream::in); - CHECK_FAIL_RETURN_UNEXPECTED(label_reader.is_open(), - "Invalid file, failed to open label file: " + label_dir.ToString() + - ", make sure file not damaged or permission denied."); - std::string line = ""; - while (getline(label_reader, line)) { - size_t quot_inx[2] = {0}; - size_t quot_num = 0; - size_t quot_exact = 2; - for (size_t i = 0; quot_num < quot_exact && i < line.size(); i++) { - if (line[i] == '"') { - quot_inx[quot_num++] = i; - } - } - if (quot_num != quot_exact) { - label_reader.close(); - RETURN_STATUS_UNEXPECTED("Invalid file, the file may not be a CMUArctic dataset file: " + label_dir.ToString()); - } - label_pairs_.push_back( - {line.substr(2, quot_inx[0] - 3), line.substr(quot_inx[0] + 1, quot_inx[1] - quot_inx[0] - 1)}); - } - label_reader.close(); - num_rows_ = label_pairs_.size(); - CHECK_FAIL_RETURN_UNEXPECTED(num_rows_ > 0, "Invalid data, no valid data found in path: " + folder_path_); - return Status::OK(); -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2022-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.h" + +#include + +#include "mindspore-lite/minddata/dataset/audio/kernels/audio_utils.h" +#include "mindspore-lite/minddata/dataset/core/config_manager.h" +#include "mindspore-lite/minddata/dataset/core/tensor_shape.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" +#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" +#include "utils/file_utils.h" + +namespace mindspore::lite { +namespace dataset { +const char kDataDirectory[] = "wav"; +const char kLabelDirectory[] = "etc"; +const char kLabelFileName[] = "txt.done.data"; +const char kDataFilePrefix[] = "cmu_us_"; +const char kDataFileSuffix[] = "_arctic"; + +CMUArcticOp::CMUArcticOp(const std::string &dataset_dir, const std::string &name, int32_t num_workers, + int32_t queue_size, std::unique_ptr data_schema, + std::shared_ptr sampler) + : MappableLeafOp(num_workers, queue_size, std::move(sampler)), + folder_path_(dataset_dir), + name_(name), + data_schema_(std::move(data_schema)) {} + +Status CMUArcticOp::LoadTensorRow(row_id_type row_id, TensorRow *trow) { + RETURN_UNEXPECTED_IF_NULL(trow); + const uint32_t sample_rate = 16000; + const std::string wav_suffix = ".wav"; + size_t pos = label_pairs_[row_id].first.find_last_of('_'); + CHECK_FAIL_RETURN_UNEXPECTED( + pos != std::string::npos && pos + 1 < label_pairs_[row_id].first.size(), + "Invalid utterance id, please check if it is in valid format: " + label_pairs_[row_id].first); + std::string utterance_id_t = label_pairs_[row_id].first.substr(pos + 1); + std::string full_name_path = kDataFilePrefix + name_ + kDataFileSuffix; + std::string file_name = label_pairs_[row_id].first + wav_suffix; + Path root_folder(real_path_); + Path wav_file_path = root_folder / full_name_path / kDataDirectory / file_name; + std::shared_ptr waveform, rate, transcript, utterance_id; + RETURN_IF_NOT_OK(ReadAudio(wav_file_path.ToString(), &waveform)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(sample_rate, &rate)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(label_pairs_[row_id].second, &transcript)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(utterance_id_t, &utterance_id)); + (*trow) = TensorRow(row_id, {std::move(waveform), std::move(rate), std::move(transcript), std::move(utterance_id)}); + Path label_dir = root_folder / full_name_path / kLabelDirectory / kLabelFileName; + trow->setPath({wav_file_path.ToString(), wav_file_path.ToString(), label_dir.ToString(), label_dir.ToString()}); + return Status::OK(); +} + +void CMUArcticOp::Print(std::ostream &out, bool show_all) const { + if (!show_all) { + ParallelOp::Print(out, show_all); + out << "\n"; + } else { + ParallelOp::Print(out, show_all); + out << "\nNumber of rows: " << num_rows_ << "\nCMUArctic directory: " << folder_path_ << "\n\n"; + } +} + +Status CMUArcticOp::CountTotalRows(const std::string &dir, const std::string &name, int64_t *count) { + RETURN_UNEXPECTED_IF_NULL(count); + *count = 0; + const int64_t num_samples = 0; + const int64_t start_index = 0; + auto sampler = std::make_shared(start_index, num_samples); + auto schema = std::make_unique(); + + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kCv, 1))); + TensorShape scalar_rate = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); + TensorShape scalar_utterance = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("transcript", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance))); + TensorShape scalar_utterance_id = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("utterance_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance_id))); + std::shared_ptr cfg = GlobalContext::config_manager(); + + int32_t num_workers = cfg->num_parallel_workers(); + int32_t op_connect_size = cfg->op_connector_size(); + auto op = + std::make_shared(dir, name, num_workers, op_connect_size, std::move(schema), std::move(sampler)); + RETURN_IF_NOT_OK(op->PrepareData()); + *count = op->label_pairs_.size(); + return Status::OK(); +} + +Status CMUArcticOp::ComputeColMap() { + if (column_name_id_map_.empty()) { + for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) { + column_name_id_map_[data_schema_->Column(i).Name()] = i; + } + } else { + MS_LOG(WARNING) << "Column name map is already set!"; + } + return Status::OK(); +} + +Status CMUArcticOp::ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform) { + RETURN_UNEXPECTED_IF_NULL(waveform); + const int32_t kWavFileSampleRate = 16000; + int32_t sample_rate = 0; + std::vector waveform_vec; + RETURN_IF_NOT_OK(ReadWaveFile(audio_dir, &waveform_vec, &sample_rate)); + CHECK_FAIL_RETURN_UNEXPECTED( + sample_rate == kWavFileSampleRate, + "Invalid file, sampling rate of CMUArctic wav file must be 16000, file path: " + audio_dir); + RETURN_IF_NOT_OK(Tensor::CreateFromVector(waveform_vec, waveform)); + RETURN_IF_NOT_OK((*waveform)->ExpandDim(0)); + return Status::OK(); +} + +Status CMUArcticOp::PrepareData() { + auto realpath = FileUtils::GetRealPath(folder_path_.c_str()); + if (!realpath.has_value()) { + MS_LOG(ERROR) << "Invalid file path, CMUArctic Dataset dir: " << folder_path_ << " does not exist."; + RETURN_STATUS_UNEXPECTED("Invalid file path, CMUArctic Dataset dir: " + folder_path_ + " does not exist."); + } + real_path_ = realpath.value(); + Path dir(real_path_); + std::string full_name_path = kDataFilePrefix + name_ + kDataFileSuffix; + Path label_dir = dir / full_name_path / kLabelDirectory / kLabelFileName; + CHECK_FAIL_RETURN_UNEXPECTED(label_dir.Exists() && !label_dir.IsDirectory(), + "Invalid file, failed to find label file: " + label_dir.ToString()); + std::ifstream label_reader(label_dir.ToString(), std::ifstream::in); + CHECK_FAIL_RETURN_UNEXPECTED(label_reader.is_open(), + "Invalid file, failed to open label file: " + label_dir.ToString() + + ", make sure file not damaged or permission denied."); + std::string line = ""; + while (getline(label_reader, line)) { + size_t quot_inx[2] = {0}; + size_t quot_num = 0; + size_t quot_exact = 2; + for (size_t i = 0; quot_num < quot_exact && i < line.size(); i++) { + if (line[i] == '"') { + quot_inx[quot_num++] = i; + } + } + if (quot_num != quot_exact) { + label_reader.close(); + RETURN_STATUS_UNEXPECTED("Invalid file, the file may not be a CMUArctic dataset file: " + label_dir.ToString()); + } + label_pairs_.push_back( + {line.substr(2, quot_inx[0] - 3), line.substr(quot_inx[0] + 1, quot_inx[1] - quot_inx[0] - 1)}); + } + label_reader.close(); + num_rows_ = label_pairs_.size(); + CHECK_FAIL_RETURN_UNEXPECTED(num_rows_ > 0, "Invalid data, no valid data found in path: " + folder_path_); + return Status::OK(); +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.h index 7def4366..6126ff22 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.h @@ -1,99 +1,99 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CMU_ARCTIC_OP_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CMU_ARCTIC_OP_H_ - -#include -#include -#include -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/core/tensor.h" -#include "mindspore-lite/minddata/dataset/engine/data_schema.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -#include "mindspore-lite/minddata/dataset/util/path.h" -#include "mindspore-lite/minddata/dataset/util/queue.h" -#include "mindspore-lite/minddata/dataset/util/status.h" -#include "mindspore-lite/minddata/dataset/util/wait_post.h" - -namespace mindspore { -namespace dataset { -class CMUArcticOp : public MappableLeafOp { - public: - /// \brief Constructor. - /// \param[in] dataset_dir Directory of CMUArctic. - /// \param[in] name Part of this dataset, can be "aew", "ahw", "aup", "awb", "axb", "bdl", - /// "clb", "eey", "fem", "gka", "jmk", "ksp", "ljm", "lnh", "rms", "rxr", "slp" or "slt" - /// \param[in] num_workers Number of workers reading audios in parallel. - /// \param[in] queue_size Connector queue size. - /// \param[in] data_schema The schema of the CMUArctic dataset. - /// \param[in] sampler Sampler tells CMUArcticOp what to read. - CMUArcticOp(const std::string &dataset_dir, const std::string &name, int32_t num_workers, int32_t queue_size, - std::unique_ptr data_schema, std::shared_ptr sampler); - - /// \brief Destructor. - ~CMUArcticOp() = default; - - /// \brief A print method typically used for debugging. - /// \param[out] out The output stream to write output to. - /// \param[in] show_all A bool to control if you want to show all info or just a summary. - void Print(std::ostream &out, bool show_all) const override; - - /// \brief Function to count the number of samples in the CMUArctic dataset. - /// \param[in] dir Path to the CMUArctic directory. - /// \param[in] name Choose the subset of CMUArctic dataset. - /// \param[out] count Output arg that will hold the minimum of the actual dataset size and numSamples. - /// \return Status The status code returned. - static Status CountTotalRows(const std::string &dir, const std::string &name, int64_t *count); - - /// \brief Op name getter. - /// \return Name of the current Op. - std::string Name() const override { return "CMUArcticOp"; } - - private: - /// \brief Load a tensor row according to a pair. - /// \param[in] row_id Id for this tensor row. - /// \param[out] row Audio & label read into this tensor row. - /// \return Status The status code returned. - Status LoadTensorRow(row_id_type row_id, TensorRow *row) override; - - /// \brief Parse a single wav file. - /// \param[in] audio_dir Audio file path. - /// \param[out] waveform The output waveform tensor. - /// \return Status The status code returned. - Status ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform); - - /// \brief Prepare all data in the directory. - /// \return Status The status code returned. - Status PrepareData(); - - /// \brief Private function for computing the assignment of the column name map. - /// \return Status. - Status ComputeColMap() override; - - const std::string name_; - std::string folder_path_; - std::string real_path_; - std::unique_ptr data_schema_; - std::vector> label_pairs_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CMU_ARCTIC_OP_H_ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CMU_ARCTIC_OP_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CMU_ARCTIC_OP_H_ + +#include +#include +#include +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/core/tensor.h" +#include "mindspore-lite/minddata/dataset/engine/data_schema.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" +#include "mindspore-lite/minddata/dataset/util/path.h" +#include "mindspore-lite/minddata/dataset/util/queue.h" +#include "mindspore-lite/minddata/dataset/util/status.h" +#include "mindspore-lite/minddata/dataset/util/wait_post.h" + +namespace mindspore::lite { +namespace dataset { +class CMUArcticOp : public MappableLeafOp { + public: + /// \brief Constructor. + /// \param[in] dataset_dir Directory of CMUArctic. + /// \param[in] name Part of this dataset, can be "aew", "ahw", "aup", "awb", "axb", "bdl", + /// "clb", "eey", "fem", "gka", "jmk", "ksp", "ljm", "lnh", "rms", "rxr", "slp" or "slt" + /// \param[in] num_workers Number of workers reading audios in parallel. + /// \param[in] queue_size Connector queue size. + /// \param[in] data_schema The schema of the CMUArctic dataset. + /// \param[in] sampler Sampler tells CMUArcticOp what to read. + CMUArcticOp(const std::string &dataset_dir, const std::string &name, int32_t num_workers, int32_t queue_size, + std::unique_ptr data_schema, std::shared_ptr sampler); + + /// \brief Destructor. + ~CMUArcticOp() = default; + + /// \brief A print method typically used for debugging. + /// \param[out] out The output stream to write output to. + /// \param[in] show_all A bool to control if you want to show all info or just a summary. + void Print(std::ostream &out, bool show_all) const override; + + /// \brief Function to count the number of samples in the CMUArctic dataset. + /// \param[in] dir Path to the CMUArctic directory. + /// \param[in] name Choose the subset of CMUArctic dataset. + /// \param[out] count Output arg that will hold the minimum of the actual dataset size and numSamples. + /// \return Status The status code returned. + static Status CountTotalRows(const std::string &dir, const std::string &name, int64_t *count); + + /// \brief Op name getter. + /// \return Name of the current Op. + std::string Name() const override { return "CMUArcticOp"; } + + private: + /// \brief Load a tensor row according to a pair. + /// \param[in] row_id Id for this tensor row. + /// \param[out] row Audio & label read into this tensor row. + /// \return Status The status code returned. + Status LoadTensorRow(row_id_type row_id, TensorRow *row) override; + + /// \brief Parse a single wav file. + /// \param[in] audio_dir Audio file path. + /// \param[out] waveform The output waveform tensor. + /// \return Status The status code returned. + Status ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform); + + /// \brief Prepare all data in the directory. + /// \return Status The status code returned. + Status PrepareData(); + + /// \brief Private function for computing the assignment of the column name map. + /// \return Status. + Status ComputeColMap() override; + + const std::string name_; + std::string folder_path_; + std::string real_path_; + std::unique_ptr data_schema_; + std::vector> label_pairs_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CMU_ARCTIC_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/coco_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/coco_op.cc index 2ab13b0e..a16d1faa 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/coco_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/coco_op.cc @@ -23,7 +23,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const char kJsonImages[] = "images"; const char kJsonImagesFileName[] = "file_name"; @@ -633,4 +633,4 @@ Status CocoOp::GetClassIndexing(std::vector @@ -320,5 +320,5 @@ class CocoOp : public MappableLeafOp { #endif }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_COCO_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/conll2000_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/conll2000_op.cc index 46db1639..e20996c5 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/conll2000_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/conll2000_op.cc @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/util/wait_post.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { CoNLL2000Op::CoNLL2000Op(int32_t num_workers, int64_t total_rows, int32_t worker_connector_size, std::unique_ptr schema, const std::vector &conll2000_file_list, @@ -174,4 +174,4 @@ Status CoNLL2000Op::LoadFile(const std::string &file, int64_t start_offset, int6 return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/conll2000_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/conll2000_op.h index 42b72c1f..cfad6efe 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/conll2000_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/conll2000_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h" #include "mindspore-lite/minddata/dataset/util/queue.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class JaggedConnector; @@ -92,5 +92,5 @@ class CoNLL2000Op : public TextFileOp { Status LoadFile(const std::string &file, int64_t start_offset, int64_t end_offset, int32_t worker_id) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_CONLL2000_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.cc index aa53aa8c..8838feef 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.cc @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { CsvOp::CsvOp(const std::vector &csv_files_list, char field_delim, @@ -818,4 +818,4 @@ bool CsvOp::ColumnNameValidate() { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h index 783968b9..1b299c59 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.h" #include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const size_t CSV_BUFFER_SIZE = 4096; @@ -242,5 +242,5 @@ class CsvOp : public NonMappableLeafOp { bool check_flag_ = false; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_DATASETOPS_SOURCE_CSV_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/dbpedia_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/dbpedia_op.cc index 73e83d44..9cf42f93 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/dbpedia_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/dbpedia_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { DBpediaOp::DBpediaOp(const std::vector &dataset_files_list, char field_delim, const std::vector> &column_default, @@ -50,4 +50,4 @@ void DBpediaOp::Print(std::ostream &out, bool show_all) const { } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/dbpedia_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/dbpedia_op.h index 4b6a1590..9379bf99 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/dbpedia_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/dbpedia_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DBpediaOp : public CsvOp { public: @@ -66,5 +66,5 @@ class DBpediaOp : public CsvOp { std::string Name() const override { return "DBpediaOp"; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_DBPEDIA_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/div2k_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/div2k_op.cc index 6c1f47e4..19aabf9e 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/div2k_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/div2k_op.cc @@ -27,7 +27,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const std::map DatasetPramMap = {{"train_hr", "DIV2K_train_HR"}, {"valid_hr", "DIV2K_valid_HR"}, @@ -268,4 +268,4 @@ Status DIV2KOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/div2k_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/div2k_op.h index d4850ac9..dc3a8d81 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/div2k_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/div2k_op.h @@ -34,7 +34,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DIV2KOp : public MappableLeafOp { public: @@ -118,5 +118,5 @@ class DIV2KOp : public MappableLeafOp { std::string lr_dir_real_name_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_DIV2K_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/emnist_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/emnist_op.cc index 49f2b9d3..a0b2a013 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/emnist_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/emnist_op.cc @@ -25,7 +25,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { EMnistOp::EMnistOp(const std::string &name, const std::string &usage, int32_t num_workers, const std::string &folder_path, int32_t queue_size, std::unique_ptr data_schema, @@ -143,4 +143,4 @@ Status EMnistOp::CountTotalRows(const std::string &dir, const std::string &name, return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/emnist_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/emnist_op.h index 559791b5..554ea17f 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/emnist_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/emnist_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares template @@ -80,5 +80,5 @@ class EMnistOp : public MnistOp { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_EMNIST_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/en_wik9_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/en_wik9_op.cc index bdf6b04c..717bd6c4 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/en_wik9_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/en_wik9_op.cc @@ -21,7 +21,7 @@ #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { EnWik9Op::EnWik9Op(int32_t num_workers, int64_t total_rows, int32_t worker_connector_size, std::unique_ptr data_schema, const std::vector &file_list, @@ -125,4 +125,4 @@ int64_t EnWik9Op::CountTotalRows(const std::string &file) { return count; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/en_wik9_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/en_wik9_op.h index 13893c89..9f206bff 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/en_wik9_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/en_wik9_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class EnWik9Op : public TextFileOp { public: @@ -73,5 +73,5 @@ class EnWik9Op : public TextFileOp { int64_t CountTotalRows(const std::string &file) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_EN_WIK9_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/fake_image_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/fake_image_op.cc index 8c5231f1..207c577d 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/fake_image_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/fake_image_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { FakeImageOp::FakeImageOp(int32_t num_images, const std::vector &image_size, int32_t num_classes, int32_t base_seed, int32_t num_workers, int32_t op_connector_size, @@ -141,4 +141,4 @@ Status FakeImageOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/fake_image_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/fake_image_op.h index 3f266bde..44ea00b4 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/fake_image_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/fake_image_op.h @@ -34,7 +34,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class FakeImageOp : public MappableLeafOp { @@ -107,5 +107,5 @@ class FakeImageOp : public MappableLeafOp { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_FAKE_IMAGE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/fashion_mnist_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/fashion_mnist_op.cc index 3372b968..d57a14f8 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/fashion_mnist_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/fashion_mnist_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { FashionMnistOp::FashionMnistOp(const std::string &usage, int32_t num_workers, const std::string &folder_path, int32_t queue_size, std::unique_ptr data_schema, @@ -88,4 +88,4 @@ Status FashionMnistOp::CountTotalRows(const std::string &dir, const std::string return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/fashion_mnist_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/fashion_mnist_op.h index f6c54c40..81eaa55f 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/fashion_mnist_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/fashion_mnist_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Forward declares. template @@ -63,5 +63,5 @@ class FashionMnistOp : public MnistOp { std::string DatasetName(bool upper = false) const override { return upper ? "FashionMnist" : "fashion mnist"; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_FASHION_MNIST_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/flickr_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/flickr_op.cc index 1fb71933..88b5ac8c 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/flickr_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/flickr_op.cc @@ -27,7 +27,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { FlickrOp::FlickrOp(int32_t num_workers, const std::string &dataset_dir, const std::string &file_path, bool decode, int32_t queue_size, std::unique_ptr data_schema, std::shared_ptr sampler) @@ -236,4 +236,4 @@ Status FlickrOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/flickr_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/flickr_op.h index 52cf903f..152432e4 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/flickr_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/flickr_op.h @@ -34,7 +34,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class FlickrOp : public MappableLeafOp { public: @@ -100,5 +100,5 @@ class FlickrOp : public MappableLeafOp { std::vector>> image_annotation_pairs_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_FLICKR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/food101_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/food101_op.cc index 07eaac11..e6e31093 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/food101_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/food101_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/path.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Food101Op::Food101Op(const std::string &folder_path, const std::string &usage, int32_t num_workers, int32_t queue_size, bool decode, std::unique_ptr data_schema, std::shared_ptr sampler) @@ -161,4 +161,4 @@ Status Food101Op::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/food101_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/food101_op.h index c7d9581d..128b20f7 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/food101_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/food101_op.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/util/queue.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class Food101Op : public MappableLeafOp { public: @@ -97,5 +97,5 @@ class Food101Op : public MappableLeafOp { std::map> annotation_map_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_FOOD101_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/generator_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/generator_op.cc index 8eada875..eb581b63 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/generator_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/generator_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { GeneratorOp::GeneratorOp(const py::function &generator_function, std::vector column_names, std::vector column_types, int32_t prefetch_size, int32_t connector_size, @@ -549,4 +549,4 @@ Status GeneratorOp::GetNextRowPullMode(TensorRow *const row) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/generator_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/generator_op.h index eeb90163..31da965a 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/generator_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/generator_op.h @@ -35,7 +35,7 @@ namespace py = pybind11; -namespace mindspore { +namespace mindspore::lite { namespace dataset { #ifndef _MSC_VER #pragma GCC visibility push(hidden) @@ -180,6 +180,6 @@ class GeneratorOp : public PipelineOp, public RandomAccessOp { #pragma GCC visibility pop #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_GENERATOR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.cc index 4d107f98..16095a63 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.cc @@ -1,335 +1,335 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.h" - -#include -#include - -#include "mindspore-lite/minddata/dataset/audio/kernels/audio_utils.h" -#include "mindspore-lite/minddata/dataset/core/config_manager.h" -#include "mindspore-lite/minddata/dataset/core/tensor_shape.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" -#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -#include "utils/file_utils.h" - -namespace mindspore { -namespace dataset { -const std::vector genres = { - "blues", "classical", "country", "disco", "hiphop", "jazz", "metal", "pop", "reggae", "rock", -}; - -const std::vector filtered_test = { - "blues.00012", "blues.00013", "blues.00014", "blues.00015", "blues.00016", "blues.00017", - "blues.00018", "blues.00019", "blues.00020", "blues.00021", "blues.00022", "blues.00023", - "blues.00024", "blues.00025", "blues.00026", "blues.00027", "blues.00028", "blues.00061", - "blues.00062", "blues.00063", "blues.00064", "blues.00065", "blues.00066", "blues.00067", - "blues.00068", "blues.00069", "blues.00070", "blues.00071", "blues.00072", "blues.00098", - "blues.00099", "classical.00011", "classical.00012", "classical.00013", "classical.00014", "classical.00015", - "classical.00016", "classical.00017", "classical.00018", "classical.00019", "classical.00020", "classical.00021", - "classical.00022", "classical.00023", "classical.00024", "classical.00025", "classical.00026", "classical.00027", - "classical.00028", "classical.00029", "classical.00034", "classical.00035", "classical.00036", "classical.00037", - "classical.00038", "classical.00039", "classical.00040", "classical.00041", "classical.00049", "classical.00077", - "classical.00078", "classical.00079", "country.00030", "country.00031", "country.00032", "country.00033", - "country.00034", "country.00035", "country.00036", "country.00037", "country.00038", "country.00039", - "country.00040", "country.00043", "country.00044", "country.00046", "country.00047", "country.00048", - "country.00050", "country.00051", "country.00053", "country.00054", "country.00055", "country.00056", - "country.00057", "country.00058", "country.00059", "country.00060", "country.00061", "country.00062", - "country.00063", "country.00064", "disco.00001", "disco.00021", "disco.00058", "disco.00062", - "disco.00063", "disco.00064", "disco.00065", "disco.00066", "disco.00069", "disco.00076", - "disco.00077", "disco.00078", "disco.00079", "disco.00080", "disco.00081", "disco.00082", - "disco.00083", "disco.00084", "disco.00085", "disco.00086", "disco.00087", "disco.00088", - "disco.00091", "disco.00092", "disco.00093", "disco.00094", "disco.00096", "disco.00097", - "disco.00099", "hiphop.00000", "hiphop.00026", "hiphop.00027", "hiphop.00030", "hiphop.00040", - "hiphop.00043", "hiphop.00044", "hiphop.00045", "hiphop.00051", "hiphop.00052", "hiphop.00053", - "hiphop.00054", "hiphop.00062", "hiphop.00063", "hiphop.00064", "hiphop.00065", "hiphop.00066", - "hiphop.00067", "hiphop.00068", "hiphop.00069", "hiphop.00070", "hiphop.00071", "hiphop.00072", - "hiphop.00073", "hiphop.00074", "hiphop.00075", "hiphop.00099", "jazz.00073", "jazz.00074", - "jazz.00075", "jazz.00076", "jazz.00077", "jazz.00078", "jazz.00079", "jazz.00080", - "jazz.00081", "jazz.00082", "jazz.00083", "jazz.00084", "jazz.00085", "jazz.00086", - "jazz.00087", "jazz.00088", "jazz.00089", "jazz.00090", "jazz.00091", "jazz.00092", - "jazz.00093", "jazz.00094", "jazz.00095", "jazz.00096", "jazz.00097", "jazz.00098", - "jazz.00099", "metal.00012", "metal.00013", "metal.00014", "metal.00015", "metal.00022", - "metal.00023", "metal.00025", "metal.00026", "metal.00027", "metal.00028", "metal.00029", - "metal.00030", "metal.00031", "metal.00032", "metal.00033", "metal.00038", "metal.00039", - "metal.00067", "metal.00070", "metal.00073", "metal.00074", "metal.00075", "metal.00078", - "metal.00083", "metal.00085", "metal.00087", "metal.00088", "pop.00000", "pop.00001", - "pop.00013", "pop.00014", "pop.00043", "pop.00063", "pop.00064", "pop.00065", - "pop.00066", "pop.00069", "pop.00070", "pop.00071", "pop.00072", "pop.00073", - "pop.00074", "pop.00075", "pop.00076", "pop.00077", "pop.00078", "pop.00079", - "pop.00082", "pop.00088", "pop.00089", "pop.00090", "pop.00091", "pop.00092", - "pop.00093", "pop.00094", "pop.00095", "pop.00096", "reggae.00034", "reggae.00035", - "reggae.00036", "reggae.00037", "reggae.00038", "reggae.00039", "reggae.00040", "reggae.00046", - "reggae.00047", "reggae.00048", "reggae.00052", "reggae.00053", "reggae.00064", "reggae.00065", - "reggae.00066", "reggae.00067", "reggae.00068", "reggae.00071", "reggae.00079", "reggae.00082", - "reggae.00083", "reggae.00084", "reggae.00087", "reggae.00088", "reggae.00089", "reggae.00090", - "rock.00010", "rock.00011", "rock.00012", "rock.00013", "rock.00014", "rock.00015", - "rock.00027", "rock.00028", "rock.00029", "rock.00030", "rock.00031", "rock.00032", - "rock.00033", "rock.00034", "rock.00035", "rock.00036", "rock.00037", "rock.00039", - "rock.00040", "rock.00041", "rock.00042", "rock.00043", "rock.00044", "rock.00045", - "rock.00046", "rock.00047", "rock.00048", "rock.00086", "rock.00087", "rock.00088", - "rock.00089", "rock.00090", -}; - -const std::vector filtered_train = { - "blues.00029", "blues.00030", "blues.00031", "blues.00032", "blues.00033", "blues.00034", - "blues.00035", "blues.00036", "blues.00037", "blues.00038", "blues.00039", "blues.00040", - "blues.00041", "blues.00042", "blues.00043", "blues.00044", "blues.00045", "blues.00046", - "blues.00047", "blues.00048", "blues.00049", "blues.00073", "blues.00074", "blues.00075", - "blues.00076", "blues.00077", "blues.00078", "blues.00079", "blues.00080", "blues.00081", - "blues.00082", "blues.00083", "blues.00084", "blues.00085", "blues.00086", "blues.00087", - "blues.00088", "blues.00089", "blues.00090", "blues.00091", "blues.00092", "blues.00093", - "blues.00094", "blues.00095", "blues.00096", "blues.00097", "classical.00030", "classical.00031", - "classical.00032", "classical.00033", "classical.00043", "classical.00044", "classical.00045", "classical.00046", - "classical.00047", "classical.00048", "classical.00050", "classical.00051", "classical.00052", "classical.00053", - "classical.00054", "classical.00055", "classical.00056", "classical.00057", "classical.00058", "classical.00059", - "classical.00060", "classical.00061", "classical.00062", "classical.00063", "classical.00064", "classical.00065", - "classical.00066", "classical.00067", "classical.00080", "classical.00081", "classical.00082", "classical.00083", - "classical.00084", "classical.00085", "classical.00086", "classical.00087", "classical.00088", "classical.00089", - "classical.00090", "classical.00091", "classical.00092", "classical.00093", "classical.00094", "classical.00095", - "classical.00096", "classical.00097", "classical.00098", "classical.00099", "country.00019", "country.00020", - "country.00021", "country.00022", "country.00023", "country.00024", "country.00025", "country.00026", - "country.00028", "country.00029", "country.00065", "country.00066", "country.00067", "country.00068", - "country.00069", "country.00070", "country.00071", "country.00072", "country.00073", "country.00074", - "country.00075", "country.00076", "country.00077", "country.00078", "country.00079", "country.00080", - "country.00081", "country.00082", "country.00083", "country.00084", "country.00085", "country.00086", - "country.00087", "country.00088", "country.00089", "country.00090", "country.00091", "country.00092", - "country.00093", "country.00094", "country.00095", "country.00096", "country.00097", "country.00098", - "country.00099", "disco.00005", "disco.00015", "disco.00016", "disco.00017", "disco.00018", - "disco.00019", "disco.00020", "disco.00022", "disco.00023", "disco.00024", "disco.00025", - "disco.00026", "disco.00027", "disco.00028", "disco.00029", "disco.00030", "disco.00031", - "disco.00032", "disco.00033", "disco.00034", "disco.00035", "disco.00036", "disco.00037", - "disco.00039", "disco.00040", "disco.00041", "disco.00042", "disco.00043", "disco.00044", - "disco.00045", "disco.00047", "disco.00049", "disco.00053", "disco.00054", "disco.00056", - "disco.00057", "disco.00059", "disco.00061", "disco.00070", "disco.00073", "disco.00074", - "disco.00089", "hiphop.00002", "hiphop.00003", "hiphop.00004", "hiphop.00005", "hiphop.00006", - "hiphop.00007", "hiphop.00008", "hiphop.00009", "hiphop.00010", "hiphop.00011", "hiphop.00012", - "hiphop.00013", "hiphop.00014", "hiphop.00015", "hiphop.00016", "hiphop.00017", "hiphop.00018", - "hiphop.00019", "hiphop.00020", "hiphop.00021", "hiphop.00022", "hiphop.00023", "hiphop.00024", - "hiphop.00025", "hiphop.00028", "hiphop.00029", "hiphop.00031", "hiphop.00032", "hiphop.00033", - "hiphop.00034", "hiphop.00035", "hiphop.00036", "hiphop.00037", "hiphop.00038", "hiphop.00041", - "hiphop.00042", "hiphop.00055", "hiphop.00056", "hiphop.00057", "hiphop.00058", "hiphop.00059", - "hiphop.00060", "hiphop.00061", "hiphop.00077", "hiphop.00078", "hiphop.00079", "hiphop.00080", - "jazz.00000", "jazz.00001", "jazz.00011", "jazz.00012", "jazz.00013", "jazz.00014", - "jazz.00015", "jazz.00016", "jazz.00017", "jazz.00018", "jazz.00019", "jazz.00020", - "jazz.00021", "jazz.00022", "jazz.00023", "jazz.00024", "jazz.00041", "jazz.00047", - "jazz.00048", "jazz.00049", "jazz.00050", "jazz.00051", "jazz.00052", "jazz.00053", - "jazz.00054", "jazz.00055", "jazz.00056", "jazz.00057", "jazz.00058", "jazz.00059", - "jazz.00060", "jazz.00061", "jazz.00062", "jazz.00063", "jazz.00064", "jazz.00065", - "jazz.00066", "jazz.00067", "jazz.00068", "jazz.00069", "jazz.00070", "jazz.00071", - "jazz.00072", "metal.00002", "metal.00003", "metal.00005", "metal.00021", "metal.00024", - "metal.00035", "metal.00046", "metal.00047", "metal.00048", "metal.00049", "metal.00050", - "metal.00051", "metal.00052", "metal.00053", "metal.00054", "metal.00055", "metal.00056", - "metal.00057", "metal.00059", "metal.00060", "metal.00061", "metal.00062", "metal.00063", - "metal.00064", "metal.00065", "metal.00066", "metal.00069", "metal.00071", "metal.00072", - "metal.00079", "metal.00080", "metal.00084", "metal.00086", "metal.00089", "metal.00090", - "metal.00091", "metal.00092", "metal.00093", "metal.00094", "metal.00095", "metal.00096", - "metal.00097", "metal.00098", "metal.00099", "pop.00002", "pop.00003", "pop.00004", - "pop.00005", "pop.00006", "pop.00007", "pop.00008", "pop.00009", "pop.00011", - "pop.00012", "pop.00016", "pop.00017", "pop.00018", "pop.00019", "pop.00020", - "pop.00023", "pop.00024", "pop.00025", "pop.00026", "pop.00027", "pop.00028", - "pop.00029", "pop.00031", "pop.00032", "pop.00033", "pop.00034", "pop.00035", - "pop.00036", "pop.00038", "pop.00039", "pop.00040", "pop.00041", "pop.00042", - "pop.00044", "pop.00046", "pop.00049", "pop.00050", "pop.00080", "pop.00097", - "pop.00098", "pop.00099", "reggae.00000", "reggae.00001", "reggae.00002", "reggae.00004", - "reggae.00006", "reggae.00009", "reggae.00011", "reggae.00012", "reggae.00014", "reggae.00015", - "reggae.00016", "reggae.00017", "reggae.00018", "reggae.00019", "reggae.00020", "reggae.00021", - "reggae.00022", "reggae.00023", "reggae.00024", "reggae.00025", "reggae.00026", "reggae.00027", - "reggae.00028", "reggae.00029", "reggae.00030", "reggae.00031", "reggae.00032", "reggae.00042", - "reggae.00043", "reggae.00044", "reggae.00045", "reggae.00049", "reggae.00050", "reggae.00051", - "reggae.00054", "reggae.00055", "reggae.00056", "reggae.00057", "reggae.00058", "reggae.00059", - "reggae.00060", "reggae.00063", "reggae.00069", "rock.00000", "rock.00001", "rock.00002", - "rock.00003", "rock.00004", "rock.00005", "rock.00006", "rock.00007", "rock.00008", - "rock.00009", "rock.00016", "rock.00017", "rock.00018", "rock.00019", "rock.00020", - "rock.00021", "rock.00022", "rock.00023", "rock.00024", "rock.00025", "rock.00026", - "rock.00057", "rock.00058", "rock.00059", "rock.00060", "rock.00061", "rock.00062", - "rock.00063", "rock.00064", "rock.00065", "rock.00066", "rock.00067", "rock.00068", - "rock.00069", "rock.00070", "rock.00091", "rock.00092", "rock.00093", "rock.00094", - "rock.00095", "rock.00096", "rock.00097", "rock.00098", "rock.00099", -}; - -const std::vector filtered_valid = { - "blues.00000", "blues.00001", "blues.00002", "blues.00003", "blues.00004", "blues.00005", - "blues.00006", "blues.00007", "blues.00008", "blues.00009", "blues.00010", "blues.00011", - "blues.00050", "blues.00051", "blues.00052", "blues.00053", "blues.00054", "blues.00055", - "blues.00056", "blues.00057", "blues.00058", "blues.00059", "blues.00060", "classical.00000", - "classical.00001", "classical.00002", "classical.00003", "classical.00004", "classical.00005", "classical.00006", - "classical.00007", "classical.00008", "classical.00009", "classical.00010", "classical.00068", "classical.00069", - "classical.00070", "classical.00071", "classical.00072", "classical.00073", "classical.00074", "classical.00075", - "classical.00076", "country.00000", "country.00001", "country.00002", "country.00003", "country.00004", - "country.00005", "country.00006", "country.00007", "country.00009", "country.00010", "country.00011", - "country.00012", "country.00013", "country.00014", "country.00015", "country.00016", "country.00017", - "country.00018", "country.00027", "country.00041", "country.00042", "country.00045", "country.00049", - "disco.00000", "disco.00002", "disco.00003", "disco.00004", "disco.00006", "disco.00007", - "disco.00008", "disco.00009", "disco.00010", "disco.00011", "disco.00012", "disco.00013", - "disco.00014", "disco.00046", "disco.00048", "disco.00052", "disco.00067", "disco.00068", - "disco.00072", "disco.00075", "disco.00090", "disco.00095", "hiphop.00081", "hiphop.00082", - "hiphop.00083", "hiphop.00084", "hiphop.00085", "hiphop.00086", "hiphop.00087", "hiphop.00088", - "hiphop.00089", "hiphop.00090", "hiphop.00091", "hiphop.00092", "hiphop.00093", "hiphop.00094", - "hiphop.00095", "hiphop.00096", "hiphop.00097", "hiphop.00098", "jazz.00002", "jazz.00003", - "jazz.00004", "jazz.00005", "jazz.00006", "jazz.00007", "jazz.00008", "jazz.00009", - "jazz.00010", "jazz.00025", "jazz.00026", "jazz.00027", "jazz.00028", "jazz.00029", - "jazz.00030", "jazz.00031", "jazz.00032", "metal.00000", "metal.00001", "metal.00006", - "metal.00007", "metal.00008", "metal.00009", "metal.00010", "metal.00011", "metal.00016", - "metal.00017", "metal.00018", "metal.00019", "metal.00020", "metal.00036", "metal.00037", - "metal.00068", "metal.00076", "metal.00077", "metal.00081", "metal.00082", "pop.00010", - "pop.00053", "pop.00055", "pop.00058", "pop.00059", "pop.00060", "pop.00061", - "pop.00062", "pop.00081", "pop.00083", "pop.00084", "pop.00085", "pop.00086", - "reggae.00061", "reggae.00062", "reggae.00070", "reggae.00072", "reggae.00074", "reggae.00076", - "reggae.00077", "reggae.00078", "reggae.00085", "reggae.00092", "reggae.00093", "reggae.00094", - "reggae.00095", "reggae.00096", "reggae.00097", "reggae.00098", "reggae.00099", "rock.00038", - "rock.00049", "rock.00050", "rock.00051", "rock.00052", "rock.00053", "rock.00054", - "rock.00055", "rock.00056", "rock.00071", "rock.00072", "rock.00073", "rock.00074", - "rock.00075", "rock.00076", "rock.00077", "rock.00078", "rock.00079", "rock.00080", - "rock.00081", "rock.00082", "rock.00083", "rock.00084", "rock.00085", -}; - -GTZANOp::GTZANOp(const std::string &usage, int32_t num_workers, const std::string &folder_path, int32_t queue_size, - std::unique_ptr data_schema, std::shared_ptr sampler) - : MappableLeafOp(num_workers, queue_size, std::move(sampler)), - usage_(usage), - folder_path_(folder_path), - data_schema_(std::move(data_schema)) {} - -Status GTZANOp::LoadTensorRow(row_id_type row_id, TensorRow *trow) { - RETURN_UNEXPECTED_IF_NULL(trow); - const uint32_t sample_rate = 22050; - std::shared_ptr waveform, rate, label; - RETURN_IF_NOT_OK(ReadAudio(audio_names_[row_id].first, &waveform)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(sample_rate, &rate)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_names_[row_id].second, &label)); - (*trow) = TensorRow(row_id, {std::move(waveform), std::move(rate), std::move(label)}); - trow->setPath({audio_names_[row_id].first, audio_names_[row_id].first, audio_names_[row_id].first}); - return Status::OK(); -} - -void GTZANOp::Print(std::ostream &out, bool show_all) const { - if (!show_all) { - ParallelOp::Print(out, show_all); - out << "\n"; - return; - } - ParallelOp::Print(out, show_all); - out << "\nNumber of rows: " << num_rows_ << "\nGTZAN directory: " << folder_path_ << "\n\n"; -} - -Status GTZANOp::CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count) { - RETURN_UNEXPECTED_IF_NULL(count); - *count = 0; - const int64_t num_samples = 0; - const int64_t start_index = 0; - auto sampler = std::make_shared(start_index, num_samples); - auto schema = std::make_unique(); - - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kCv, 1))); - TensorShape scalar_rate = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); - TensorShape scalar_label = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK( - schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_label))); - - std::shared_ptr cfg = GlobalContext::config_manager(); - int32_t num_workers = cfg->num_parallel_workers(); - int32_t op_connect_size = cfg->op_connector_size(); - auto op = std::make_shared(usage, num_workers, dir, op_connect_size, std::move(schema), std::move(sampler)); - RETURN_IF_NOT_OK(op->PrepareData()); - *count = op->audio_names_.size(); - return Status::OK(); -} - -Status GTZANOp::ComputeColMap() { - if (column_name_id_map_.empty()) { - for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) { - column_name_id_map_[data_schema_->Column(i).Name()] = i; - } - } else { - MS_LOG(WARNING) << "Column name map is already set!"; - } - return Status::OK(); -} - -Status GTZANOp::ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform) { - RETURN_UNEXPECTED_IF_NULL(waveform); - const int32_t kWavFileSampleRate = 22050; - int32_t sample_rate = 0; - std::vector waveform_vec; - RETURN_IF_NOT_OK(ReadWaveFile(audio_dir, &waveform_vec, &sample_rate)); - CHECK_FAIL_RETURN_UNEXPECTED(sample_rate == kWavFileSampleRate, - "Invalid file, sampling rate of GTZAN wav file must be 22050, file path: " + audio_dir); - RETURN_IF_NOT_OK(Tensor::CreateFromVector(waveform_vec, waveform)); - RETURN_IF_NOT_OK((*waveform)->ExpandDim(0)); - return Status::OK(); -} - -Status GTZANOp::PrepareData() { - auto realpath = FileUtils::GetRealPath(folder_path_.c_str()); - if (!realpath.has_value()) { - MS_LOG(ERROR) << "Invalid file path, GTZAN Dataset dir: " << folder_path_ << " does not exist."; - RETURN_STATUS_UNEXPECTED("Invalid file path, GTZAN Dataset dir: " + folder_path_ + " does not exist."); - } - Path dir(folder_path_); - - if (usage_ == "all") { - for (std::string sub_directory : genres) { - Path full_dir = dir / sub_directory; - if (!full_dir.Exists() || !full_dir.IsDirectory()) { - continue; - } - auto dir_it = Path::DirIterator::OpenDirectory(&full_dir); - if (dir_it != nullptr) { - while (dir_it->HasNext()) { - Path file = dir_it->Next(); - std::string file_name = file.ToString(); - auto pos = file_name.find_last_of('.'); - std::string name = file_name.substr(0, pos), temp_ext = file_name.substr(pos); - if (temp_ext == ".wav" && name.find('.') != std::string::npos) { - audio_names_.push_back({file.ToString(), sub_directory}); - } else { - MS_LOG(WARNING) << "Invalid file, invalid file name or file type: " << file.ToString() << "."; - } - } - } else { - MS_LOG(WARNING) << "Invalid file path, unable to open directory: " << full_dir.ToString() << "."; - } - } - } else { - const std::vector *files_point = nullptr; - if (usage_ == "test") { - files_point = &filtered_test; - } else if (usage_ == "train") { - files_point = &filtered_train; - } else { - files_point = &filtered_valid; - } - std::string ext = ".wav"; - for (auto sub_file_name : *files_point) { - auto pos = sub_file_name.find_first_of('.'); - std::string cls = sub_file_name.substr(0, pos); - Path full_dir = dir / cls / (sub_file_name + ext); - if (full_dir.Exists()) { - audio_names_.push_back({full_dir.ToString(), cls}); - } else { - MS_LOG(WARNING) << "The audio file is lost, file name= " << (sub_file_name + ext); - } - } - } - num_rows_ = audio_names_.size(); - CHECK_FAIL_RETURN_UNEXPECTED(num_rows_ > 0, "Invalid data, no valid data found in path:" + folder_path_); - return Status::OK(); -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.h" + +#include +#include + +#include "mindspore-lite/minddata/dataset/audio/kernels/audio_utils.h" +#include "mindspore-lite/minddata/dataset/core/config_manager.h" +#include "mindspore-lite/minddata/dataset/core/tensor_shape.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" +#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" +#include "utils/file_utils.h" + +namespace mindspore::lite { +namespace dataset { +const std::vector genres = { + "blues", "classical", "country", "disco", "hiphop", "jazz", "metal", "pop", "reggae", "rock", +}; + +const std::vector filtered_test = { + "blues.00012", "blues.00013", "blues.00014", "blues.00015", "blues.00016", "blues.00017", + "blues.00018", "blues.00019", "blues.00020", "blues.00021", "blues.00022", "blues.00023", + "blues.00024", "blues.00025", "blues.00026", "blues.00027", "blues.00028", "blues.00061", + "blues.00062", "blues.00063", "blues.00064", "blues.00065", "blues.00066", "blues.00067", + "blues.00068", "blues.00069", "blues.00070", "blues.00071", "blues.00072", "blues.00098", + "blues.00099", "classical.00011", "classical.00012", "classical.00013", "classical.00014", "classical.00015", + "classical.00016", "classical.00017", "classical.00018", "classical.00019", "classical.00020", "classical.00021", + "classical.00022", "classical.00023", "classical.00024", "classical.00025", "classical.00026", "classical.00027", + "classical.00028", "classical.00029", "classical.00034", "classical.00035", "classical.00036", "classical.00037", + "classical.00038", "classical.00039", "classical.00040", "classical.00041", "classical.00049", "classical.00077", + "classical.00078", "classical.00079", "country.00030", "country.00031", "country.00032", "country.00033", + "country.00034", "country.00035", "country.00036", "country.00037", "country.00038", "country.00039", + "country.00040", "country.00043", "country.00044", "country.00046", "country.00047", "country.00048", + "country.00050", "country.00051", "country.00053", "country.00054", "country.00055", "country.00056", + "country.00057", "country.00058", "country.00059", "country.00060", "country.00061", "country.00062", + "country.00063", "country.00064", "disco.00001", "disco.00021", "disco.00058", "disco.00062", + "disco.00063", "disco.00064", "disco.00065", "disco.00066", "disco.00069", "disco.00076", + "disco.00077", "disco.00078", "disco.00079", "disco.00080", "disco.00081", "disco.00082", + "disco.00083", "disco.00084", "disco.00085", "disco.00086", "disco.00087", "disco.00088", + "disco.00091", "disco.00092", "disco.00093", "disco.00094", "disco.00096", "disco.00097", + "disco.00099", "hiphop.00000", "hiphop.00026", "hiphop.00027", "hiphop.00030", "hiphop.00040", + "hiphop.00043", "hiphop.00044", "hiphop.00045", "hiphop.00051", "hiphop.00052", "hiphop.00053", + "hiphop.00054", "hiphop.00062", "hiphop.00063", "hiphop.00064", "hiphop.00065", "hiphop.00066", + "hiphop.00067", "hiphop.00068", "hiphop.00069", "hiphop.00070", "hiphop.00071", "hiphop.00072", + "hiphop.00073", "hiphop.00074", "hiphop.00075", "hiphop.00099", "jazz.00073", "jazz.00074", + "jazz.00075", "jazz.00076", "jazz.00077", "jazz.00078", "jazz.00079", "jazz.00080", + "jazz.00081", "jazz.00082", "jazz.00083", "jazz.00084", "jazz.00085", "jazz.00086", + "jazz.00087", "jazz.00088", "jazz.00089", "jazz.00090", "jazz.00091", "jazz.00092", + "jazz.00093", "jazz.00094", "jazz.00095", "jazz.00096", "jazz.00097", "jazz.00098", + "jazz.00099", "metal.00012", "metal.00013", "metal.00014", "metal.00015", "metal.00022", + "metal.00023", "metal.00025", "metal.00026", "metal.00027", "metal.00028", "metal.00029", + "metal.00030", "metal.00031", "metal.00032", "metal.00033", "metal.00038", "metal.00039", + "metal.00067", "metal.00070", "metal.00073", "metal.00074", "metal.00075", "metal.00078", + "metal.00083", "metal.00085", "metal.00087", "metal.00088", "pop.00000", "pop.00001", + "pop.00013", "pop.00014", "pop.00043", "pop.00063", "pop.00064", "pop.00065", + "pop.00066", "pop.00069", "pop.00070", "pop.00071", "pop.00072", "pop.00073", + "pop.00074", "pop.00075", "pop.00076", "pop.00077", "pop.00078", "pop.00079", + "pop.00082", "pop.00088", "pop.00089", "pop.00090", "pop.00091", "pop.00092", + "pop.00093", "pop.00094", "pop.00095", "pop.00096", "reggae.00034", "reggae.00035", + "reggae.00036", "reggae.00037", "reggae.00038", "reggae.00039", "reggae.00040", "reggae.00046", + "reggae.00047", "reggae.00048", "reggae.00052", "reggae.00053", "reggae.00064", "reggae.00065", + "reggae.00066", "reggae.00067", "reggae.00068", "reggae.00071", "reggae.00079", "reggae.00082", + "reggae.00083", "reggae.00084", "reggae.00087", "reggae.00088", "reggae.00089", "reggae.00090", + "rock.00010", "rock.00011", "rock.00012", "rock.00013", "rock.00014", "rock.00015", + "rock.00027", "rock.00028", "rock.00029", "rock.00030", "rock.00031", "rock.00032", + "rock.00033", "rock.00034", "rock.00035", "rock.00036", "rock.00037", "rock.00039", + "rock.00040", "rock.00041", "rock.00042", "rock.00043", "rock.00044", "rock.00045", + "rock.00046", "rock.00047", "rock.00048", "rock.00086", "rock.00087", "rock.00088", + "rock.00089", "rock.00090", +}; + +const std::vector filtered_train = { + "blues.00029", "blues.00030", "blues.00031", "blues.00032", "blues.00033", "blues.00034", + "blues.00035", "blues.00036", "blues.00037", "blues.00038", "blues.00039", "blues.00040", + "blues.00041", "blues.00042", "blues.00043", "blues.00044", "blues.00045", "blues.00046", + "blues.00047", "blues.00048", "blues.00049", "blues.00073", "blues.00074", "blues.00075", + "blues.00076", "blues.00077", "blues.00078", "blues.00079", "blues.00080", "blues.00081", + "blues.00082", "blues.00083", "blues.00084", "blues.00085", "blues.00086", "blues.00087", + "blues.00088", "blues.00089", "blues.00090", "blues.00091", "blues.00092", "blues.00093", + "blues.00094", "blues.00095", "blues.00096", "blues.00097", "classical.00030", "classical.00031", + "classical.00032", "classical.00033", "classical.00043", "classical.00044", "classical.00045", "classical.00046", + "classical.00047", "classical.00048", "classical.00050", "classical.00051", "classical.00052", "classical.00053", + "classical.00054", "classical.00055", "classical.00056", "classical.00057", "classical.00058", "classical.00059", + "classical.00060", "classical.00061", "classical.00062", "classical.00063", "classical.00064", "classical.00065", + "classical.00066", "classical.00067", "classical.00080", "classical.00081", "classical.00082", "classical.00083", + "classical.00084", "classical.00085", "classical.00086", "classical.00087", "classical.00088", "classical.00089", + "classical.00090", "classical.00091", "classical.00092", "classical.00093", "classical.00094", "classical.00095", + "classical.00096", "classical.00097", "classical.00098", "classical.00099", "country.00019", "country.00020", + "country.00021", "country.00022", "country.00023", "country.00024", "country.00025", "country.00026", + "country.00028", "country.00029", "country.00065", "country.00066", "country.00067", "country.00068", + "country.00069", "country.00070", "country.00071", "country.00072", "country.00073", "country.00074", + "country.00075", "country.00076", "country.00077", "country.00078", "country.00079", "country.00080", + "country.00081", "country.00082", "country.00083", "country.00084", "country.00085", "country.00086", + "country.00087", "country.00088", "country.00089", "country.00090", "country.00091", "country.00092", + "country.00093", "country.00094", "country.00095", "country.00096", "country.00097", "country.00098", + "country.00099", "disco.00005", "disco.00015", "disco.00016", "disco.00017", "disco.00018", + "disco.00019", "disco.00020", "disco.00022", "disco.00023", "disco.00024", "disco.00025", + "disco.00026", "disco.00027", "disco.00028", "disco.00029", "disco.00030", "disco.00031", + "disco.00032", "disco.00033", "disco.00034", "disco.00035", "disco.00036", "disco.00037", + "disco.00039", "disco.00040", "disco.00041", "disco.00042", "disco.00043", "disco.00044", + "disco.00045", "disco.00047", "disco.00049", "disco.00053", "disco.00054", "disco.00056", + "disco.00057", "disco.00059", "disco.00061", "disco.00070", "disco.00073", "disco.00074", + "disco.00089", "hiphop.00002", "hiphop.00003", "hiphop.00004", "hiphop.00005", "hiphop.00006", + "hiphop.00007", "hiphop.00008", "hiphop.00009", "hiphop.00010", "hiphop.00011", "hiphop.00012", + "hiphop.00013", "hiphop.00014", "hiphop.00015", "hiphop.00016", "hiphop.00017", "hiphop.00018", + "hiphop.00019", "hiphop.00020", "hiphop.00021", "hiphop.00022", "hiphop.00023", "hiphop.00024", + "hiphop.00025", "hiphop.00028", "hiphop.00029", "hiphop.00031", "hiphop.00032", "hiphop.00033", + "hiphop.00034", "hiphop.00035", "hiphop.00036", "hiphop.00037", "hiphop.00038", "hiphop.00041", + "hiphop.00042", "hiphop.00055", "hiphop.00056", "hiphop.00057", "hiphop.00058", "hiphop.00059", + "hiphop.00060", "hiphop.00061", "hiphop.00077", "hiphop.00078", "hiphop.00079", "hiphop.00080", + "jazz.00000", "jazz.00001", "jazz.00011", "jazz.00012", "jazz.00013", "jazz.00014", + "jazz.00015", "jazz.00016", "jazz.00017", "jazz.00018", "jazz.00019", "jazz.00020", + "jazz.00021", "jazz.00022", "jazz.00023", "jazz.00024", "jazz.00041", "jazz.00047", + "jazz.00048", "jazz.00049", "jazz.00050", "jazz.00051", "jazz.00052", "jazz.00053", + "jazz.00054", "jazz.00055", "jazz.00056", "jazz.00057", "jazz.00058", "jazz.00059", + "jazz.00060", "jazz.00061", "jazz.00062", "jazz.00063", "jazz.00064", "jazz.00065", + "jazz.00066", "jazz.00067", "jazz.00068", "jazz.00069", "jazz.00070", "jazz.00071", + "jazz.00072", "metal.00002", "metal.00003", "metal.00005", "metal.00021", "metal.00024", + "metal.00035", "metal.00046", "metal.00047", "metal.00048", "metal.00049", "metal.00050", + "metal.00051", "metal.00052", "metal.00053", "metal.00054", "metal.00055", "metal.00056", + "metal.00057", "metal.00059", "metal.00060", "metal.00061", "metal.00062", "metal.00063", + "metal.00064", "metal.00065", "metal.00066", "metal.00069", "metal.00071", "metal.00072", + "metal.00079", "metal.00080", "metal.00084", "metal.00086", "metal.00089", "metal.00090", + "metal.00091", "metal.00092", "metal.00093", "metal.00094", "metal.00095", "metal.00096", + "metal.00097", "metal.00098", "metal.00099", "pop.00002", "pop.00003", "pop.00004", + "pop.00005", "pop.00006", "pop.00007", "pop.00008", "pop.00009", "pop.00011", + "pop.00012", "pop.00016", "pop.00017", "pop.00018", "pop.00019", "pop.00020", + "pop.00023", "pop.00024", "pop.00025", "pop.00026", "pop.00027", "pop.00028", + "pop.00029", "pop.00031", "pop.00032", "pop.00033", "pop.00034", "pop.00035", + "pop.00036", "pop.00038", "pop.00039", "pop.00040", "pop.00041", "pop.00042", + "pop.00044", "pop.00046", "pop.00049", "pop.00050", "pop.00080", "pop.00097", + "pop.00098", "pop.00099", "reggae.00000", "reggae.00001", "reggae.00002", "reggae.00004", + "reggae.00006", "reggae.00009", "reggae.00011", "reggae.00012", "reggae.00014", "reggae.00015", + "reggae.00016", "reggae.00017", "reggae.00018", "reggae.00019", "reggae.00020", "reggae.00021", + "reggae.00022", "reggae.00023", "reggae.00024", "reggae.00025", "reggae.00026", "reggae.00027", + "reggae.00028", "reggae.00029", "reggae.00030", "reggae.00031", "reggae.00032", "reggae.00042", + "reggae.00043", "reggae.00044", "reggae.00045", "reggae.00049", "reggae.00050", "reggae.00051", + "reggae.00054", "reggae.00055", "reggae.00056", "reggae.00057", "reggae.00058", "reggae.00059", + "reggae.00060", "reggae.00063", "reggae.00069", "rock.00000", "rock.00001", "rock.00002", + "rock.00003", "rock.00004", "rock.00005", "rock.00006", "rock.00007", "rock.00008", + "rock.00009", "rock.00016", "rock.00017", "rock.00018", "rock.00019", "rock.00020", + "rock.00021", "rock.00022", "rock.00023", "rock.00024", "rock.00025", "rock.00026", + "rock.00057", "rock.00058", "rock.00059", "rock.00060", "rock.00061", "rock.00062", + "rock.00063", "rock.00064", "rock.00065", "rock.00066", "rock.00067", "rock.00068", + "rock.00069", "rock.00070", "rock.00091", "rock.00092", "rock.00093", "rock.00094", + "rock.00095", "rock.00096", "rock.00097", "rock.00098", "rock.00099", +}; + +const std::vector filtered_valid = { + "blues.00000", "blues.00001", "blues.00002", "blues.00003", "blues.00004", "blues.00005", + "blues.00006", "blues.00007", "blues.00008", "blues.00009", "blues.00010", "blues.00011", + "blues.00050", "blues.00051", "blues.00052", "blues.00053", "blues.00054", "blues.00055", + "blues.00056", "blues.00057", "blues.00058", "blues.00059", "blues.00060", "classical.00000", + "classical.00001", "classical.00002", "classical.00003", "classical.00004", "classical.00005", "classical.00006", + "classical.00007", "classical.00008", "classical.00009", "classical.00010", "classical.00068", "classical.00069", + "classical.00070", "classical.00071", "classical.00072", "classical.00073", "classical.00074", "classical.00075", + "classical.00076", "country.00000", "country.00001", "country.00002", "country.00003", "country.00004", + "country.00005", "country.00006", "country.00007", "country.00009", "country.00010", "country.00011", + "country.00012", "country.00013", "country.00014", "country.00015", "country.00016", "country.00017", + "country.00018", "country.00027", "country.00041", "country.00042", "country.00045", "country.00049", + "disco.00000", "disco.00002", "disco.00003", "disco.00004", "disco.00006", "disco.00007", + "disco.00008", "disco.00009", "disco.00010", "disco.00011", "disco.00012", "disco.00013", + "disco.00014", "disco.00046", "disco.00048", "disco.00052", "disco.00067", "disco.00068", + "disco.00072", "disco.00075", "disco.00090", "disco.00095", "hiphop.00081", "hiphop.00082", + "hiphop.00083", "hiphop.00084", "hiphop.00085", "hiphop.00086", "hiphop.00087", "hiphop.00088", + "hiphop.00089", "hiphop.00090", "hiphop.00091", "hiphop.00092", "hiphop.00093", "hiphop.00094", + "hiphop.00095", "hiphop.00096", "hiphop.00097", "hiphop.00098", "jazz.00002", "jazz.00003", + "jazz.00004", "jazz.00005", "jazz.00006", "jazz.00007", "jazz.00008", "jazz.00009", + "jazz.00010", "jazz.00025", "jazz.00026", "jazz.00027", "jazz.00028", "jazz.00029", + "jazz.00030", "jazz.00031", "jazz.00032", "metal.00000", "metal.00001", "metal.00006", + "metal.00007", "metal.00008", "metal.00009", "metal.00010", "metal.00011", "metal.00016", + "metal.00017", "metal.00018", "metal.00019", "metal.00020", "metal.00036", "metal.00037", + "metal.00068", "metal.00076", "metal.00077", "metal.00081", "metal.00082", "pop.00010", + "pop.00053", "pop.00055", "pop.00058", "pop.00059", "pop.00060", "pop.00061", + "pop.00062", "pop.00081", "pop.00083", "pop.00084", "pop.00085", "pop.00086", + "reggae.00061", "reggae.00062", "reggae.00070", "reggae.00072", "reggae.00074", "reggae.00076", + "reggae.00077", "reggae.00078", "reggae.00085", "reggae.00092", "reggae.00093", "reggae.00094", + "reggae.00095", "reggae.00096", "reggae.00097", "reggae.00098", "reggae.00099", "rock.00038", + "rock.00049", "rock.00050", "rock.00051", "rock.00052", "rock.00053", "rock.00054", + "rock.00055", "rock.00056", "rock.00071", "rock.00072", "rock.00073", "rock.00074", + "rock.00075", "rock.00076", "rock.00077", "rock.00078", "rock.00079", "rock.00080", + "rock.00081", "rock.00082", "rock.00083", "rock.00084", "rock.00085", +}; + +GTZANOp::GTZANOp(const std::string &usage, int32_t num_workers, const std::string &folder_path, int32_t queue_size, + std::unique_ptr data_schema, std::shared_ptr sampler) + : MappableLeafOp(num_workers, queue_size, std::move(sampler)), + usage_(usage), + folder_path_(folder_path), + data_schema_(std::move(data_schema)) {} + +Status GTZANOp::LoadTensorRow(row_id_type row_id, TensorRow *trow) { + RETURN_UNEXPECTED_IF_NULL(trow); + const uint32_t sample_rate = 22050; + std::shared_ptr waveform, rate, label; + RETURN_IF_NOT_OK(ReadAudio(audio_names_[row_id].first, &waveform)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(sample_rate, &rate)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_names_[row_id].second, &label)); + (*trow) = TensorRow(row_id, {std::move(waveform), std::move(rate), std::move(label)}); + trow->setPath({audio_names_[row_id].first, audio_names_[row_id].first, audio_names_[row_id].first}); + return Status::OK(); +} + +void GTZANOp::Print(std::ostream &out, bool show_all) const { + if (!show_all) { + ParallelOp::Print(out, show_all); + out << "\n"; + return; + } + ParallelOp::Print(out, show_all); + out << "\nNumber of rows: " << num_rows_ << "\nGTZAN directory: " << folder_path_ << "\n\n"; +} + +Status GTZANOp::CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count) { + RETURN_UNEXPECTED_IF_NULL(count); + *count = 0; + const int64_t num_samples = 0; + const int64_t start_index = 0; + auto sampler = std::make_shared(start_index, num_samples); + auto schema = std::make_unique(); + + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kCv, 1))); + TensorShape scalar_rate = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); + TensorShape scalar_label = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK( + schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_label))); + + std::shared_ptr cfg = GlobalContext::config_manager(); + int32_t num_workers = cfg->num_parallel_workers(); + int32_t op_connect_size = cfg->op_connector_size(); + auto op = std::make_shared(usage, num_workers, dir, op_connect_size, std::move(schema), std::move(sampler)); + RETURN_IF_NOT_OK(op->PrepareData()); + *count = op->audio_names_.size(); + return Status::OK(); +} + +Status GTZANOp::ComputeColMap() { + if (column_name_id_map_.empty()) { + for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) { + column_name_id_map_[data_schema_->Column(i).Name()] = i; + } + } else { + MS_LOG(WARNING) << "Column name map is already set!"; + } + return Status::OK(); +} + +Status GTZANOp::ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform) { + RETURN_UNEXPECTED_IF_NULL(waveform); + const int32_t kWavFileSampleRate = 22050; + int32_t sample_rate = 0; + std::vector waveform_vec; + RETURN_IF_NOT_OK(ReadWaveFile(audio_dir, &waveform_vec, &sample_rate)); + CHECK_FAIL_RETURN_UNEXPECTED(sample_rate == kWavFileSampleRate, + "Invalid file, sampling rate of GTZAN wav file must be 22050, file path: " + audio_dir); + RETURN_IF_NOT_OK(Tensor::CreateFromVector(waveform_vec, waveform)); + RETURN_IF_NOT_OK((*waveform)->ExpandDim(0)); + return Status::OK(); +} + +Status GTZANOp::PrepareData() { + auto realpath = FileUtils::GetRealPath(folder_path_.c_str()); + if (!realpath.has_value()) { + MS_LOG(ERROR) << "Invalid file path, GTZAN Dataset dir: " << folder_path_ << " does not exist."; + RETURN_STATUS_UNEXPECTED("Invalid file path, GTZAN Dataset dir: " + folder_path_ + " does not exist."); + } + Path dir(folder_path_); + + if (usage_ == "all") { + for (std::string sub_directory : genres) { + Path full_dir = dir / sub_directory; + if (!full_dir.Exists() || !full_dir.IsDirectory()) { + continue; + } + auto dir_it = Path::DirIterator::OpenDirectory(&full_dir); + if (dir_it != nullptr) { + while (dir_it->HasNext()) { + Path file = dir_it->Next(); + std::string file_name = file.ToString(); + auto pos = file_name.find_last_of('.'); + std::string name = file_name.substr(0, pos), temp_ext = file_name.substr(pos); + if (temp_ext == ".wav" && name.find('.') != std::string::npos) { + audio_names_.push_back({file.ToString(), sub_directory}); + } else { + MS_LOG(WARNING) << "Invalid file, invalid file name or file type: " << file.ToString() << "."; + } + } + } else { + MS_LOG(WARNING) << "Invalid file path, unable to open directory: " << full_dir.ToString() << "."; + } + } + } else { + const std::vector *files_point = nullptr; + if (usage_ == "test") { + files_point = &filtered_test; + } else if (usage_ == "train") { + files_point = &filtered_train; + } else { + files_point = &filtered_valid; + } + std::string ext = ".wav"; + for (auto sub_file_name : *files_point) { + auto pos = sub_file_name.find_first_of('.'); + std::string cls = sub_file_name.substr(0, pos); + Path full_dir = dir / cls / (sub_file_name + ext); + if (full_dir.Exists()) { + audio_names_.push_back({full_dir.ToString(), cls}); + } else { + MS_LOG(WARNING) << "The audio file is lost, file name= " << (sub_file_name + ext); + } + } + } + num_rows_ = audio_names_.size(); + CHECK_FAIL_RETURN_UNEXPECTED(num_rows_ > 0, "Invalid data, no valid data found in path:" + folder_path_); + return Status::OK(); +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.h index 3aae85c4..8df3b057 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.h @@ -1,97 +1,97 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_GTZAN_OP_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_GTZAN_OP_H_ - -#include -#include -#include -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/core/tensor.h" -#include "mindspore-lite/minddata/dataset/engine/data_schema.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -#include "mindspore-lite/minddata/dataset/util/path.h" -#include "mindspore-lite/minddata/dataset/util/queue.h" -#include "mindspore-lite/minddata/dataset/util/status.h" -#include "mindspore-lite/minddata/dataset/util/wait_post.h" - -namespace mindspore { -namespace dataset { -class GTZANOp : public MappableLeafOp { - public: - /// \brief Constructor - /// \param[in] usage Usage of this dataset, can be 'train', 'valid', 'test', or 'all'. - /// \param[in] num_workers Number of workers reading audios in parallel. - /// \param[in] folder_path Dir directory of GTZAN. - /// \param[in] queue_size Connector queue size. - /// \param[in] data_schema The schema of the GTZAN dataset. - /// \param[in] sampler Sampler tells GTZANOp what to read. - GTZANOp(const std::string &usage, int32_t num_workers, const std::string &folder_path, int32_t queue_size, - std::unique_ptr data_schema, std::shared_ptr sampler); - - /// \Destructor. - ~GTZANOp() = default; - - /// \A print method typically used for debugging. - /// \param[out] out Output stream. - /// \param[in] show_all Whether to show all information. - void Print(std::ostream &out, bool show_all) const override; - - /// \Function to count the number of samples in the GTZAN dataset. - /// \param[in] dir Path to the GTZAN directory. - /// \param[in] usage Choose the subset of GTZAN dataset. - /// \param[out] count Output arg that will hold the actual dataset size. - /// \return Status The status code returned. - static Status CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count); - - /// \Op name getter. - /// \return Name of the current Op. - std::string Name() const override { return "GTZANOp"; } - - private: - /// \Load a tensor row according to a pair. - /// \param[in] row_id Id for this tensor row. - /// \param[out] row Audio & label read into this tensor row. - /// \return Status The status code returned. - Status LoadTensorRow(row_id_type row_id, TensorRow *row) override; - - /// \Parse a audio file. - /// \param[in] audio_dir Audio file path. - /// \param[out] waveform The output waveform tensor. - /// \return Status The status code returned. - Status ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform); - - /// \Prepare data. - /// \return Status The status code returned. - Status PrepareData(); - - /// \Private function for computing the assignment of the column name map. - /// \return Status The status code returned. - Status ComputeColMap() override; - - const std::string usage_; - std::string folder_path_; - std::unique_ptr data_schema_; - std::vector> audio_names_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_GTZAN_OP_H_ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_GTZAN_OP_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_GTZAN_OP_H_ + +#include +#include +#include +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/core/tensor.h" +#include "mindspore-lite/minddata/dataset/engine/data_schema.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" +#include "mindspore-lite/minddata/dataset/util/path.h" +#include "mindspore-lite/minddata/dataset/util/queue.h" +#include "mindspore-lite/minddata/dataset/util/status.h" +#include "mindspore-lite/minddata/dataset/util/wait_post.h" + +namespace mindspore::lite { +namespace dataset { +class GTZANOp : public MappableLeafOp { + public: + /// \brief Constructor + /// \param[in] usage Usage of this dataset, can be 'train', 'valid', 'test', or 'all'. + /// \param[in] num_workers Number of workers reading audios in parallel. + /// \param[in] folder_path Dir directory of GTZAN. + /// \param[in] queue_size Connector queue size. + /// \param[in] data_schema The schema of the GTZAN dataset. + /// \param[in] sampler Sampler tells GTZANOp what to read. + GTZANOp(const std::string &usage, int32_t num_workers, const std::string &folder_path, int32_t queue_size, + std::unique_ptr data_schema, std::shared_ptr sampler); + + /// \Destructor. + ~GTZANOp() = default; + + /// \A print method typically used for debugging. + /// \param[out] out Output stream. + /// \param[in] show_all Whether to show all information. + void Print(std::ostream &out, bool show_all) const override; + + /// \Function to count the number of samples in the GTZAN dataset. + /// \param[in] dir Path to the GTZAN directory. + /// \param[in] usage Choose the subset of GTZAN dataset. + /// \param[out] count Output arg that will hold the actual dataset size. + /// \return Status The status code returned. + static Status CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count); + + /// \Op name getter. + /// \return Name of the current Op. + std::string Name() const override { return "GTZANOp"; } + + private: + /// \Load a tensor row according to a pair. + /// \param[in] row_id Id for this tensor row. + /// \param[out] row Audio & label read into this tensor row. + /// \return Status The status code returned. + Status LoadTensorRow(row_id_type row_id, TensorRow *row) override; + + /// \Parse a audio file. + /// \param[in] audio_dir Audio file path. + /// \param[out] waveform The output waveform tensor. + /// \return Status The status code returned. + Status ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform); + + /// \Prepare data. + /// \return Status The status code returned. + Status PrepareData(); + + /// \Private function for computing the assignment of the column name map. + /// \return Status The status code returned. + Status ComputeColMap() override; + + const std::string usage_; + std::string folder_path_; + std::unique_ptr data_schema_; + std::vector> audio_names_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_GTZAN_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.cc index 833a47b5..fc07b767 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #ifdef ENABLE_PYTHON ImageFolderOp::ImageFolderOp(int32_t num_wkrs, std::string file_dir, int32_t queue_size, bool recursive, bool do_decode, @@ -401,4 +401,4 @@ Status ImageFolderOp::GetClassIndexing( return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.h index ac09d6ed..c5497db5 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.h @@ -42,7 +42,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// Forward declares template @@ -176,5 +176,5 @@ class ImageFolderOp : public MappableLeafOp { #endif }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_IMAGE_FOLDER_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/imdb_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/imdb_op.cc index a235886d..599151f9 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/imdb_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/imdb_op.cc @@ -26,7 +26,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr int32_t kNumClasses = 2; @@ -229,4 +229,4 @@ Status IMDBOp::LoadTensor(const std::string &line, std::shared_ptr *out_ return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/imdb_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/imdb_op.h index 919615eb..511360a6 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/imdb_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/imdb_op.h @@ -38,7 +38,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// Forward declares template @@ -130,5 +130,5 @@ class IMDBOp : public MappableLeafOp { std::vector> text_label_pairs_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_IMDB_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.cc index 08c819f3..8aa2a21f 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.cc @@ -18,7 +18,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { // IOBlock Class // @@ -83,4 +83,4 @@ Status FilenameBlock::GetFilename(std::string *out_filename, const AutoIndexObj< return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.h index 290b481f..6a37576c 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.h @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/util/auto_index.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // The IOBlock class is used to describe a "unit of work" that a storage leaf operator worker thread // is responsible for acting on. @@ -143,5 +143,5 @@ class FilenameBlock : public IOBlock { int64_t end_offset_; }; // class TFBlock } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_IO_BLOCK_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/iwslt_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/iwslt_op.cc index e545b0c7..f8bd5182 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/iwslt_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/iwslt_op.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { IWSLTOp::IWSLTOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, int32_t op_connector_size, bool shuffle_files, int32_t num_devices, int32_t device_id, std::unique_ptr data_schema, @@ -540,4 +540,4 @@ Status IWSLTOp::GetFiles() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/iwslt_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/iwslt_op.h index ad514a3d..db0d7ba7 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/iwslt_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/iwslt_op.h @@ -34,7 +34,7 @@ using tinyxml2::XMLDocument; using tinyxml2::XMLElement; using tinyxml2::XMLError; -namespace mindspore { +namespace mindspore::lite { namespace dataset { class JaggedConnector; @@ -232,5 +232,5 @@ class IWSLTOp : public NonMappableLeafOp { std::string test_set_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_IWSLT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/kitti_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/kitti_op.cc index 758e048a..34456eb0 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/kitti_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/kitti_op.cc @@ -28,7 +28,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr int kLabelNameIndex = 0; constexpr int kTruncatedIndex = 1; @@ -320,4 +320,4 @@ Status KITTIOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/kitti_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/kitti_op.h index 65d98a61..4255379e 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/kitti_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/kitti_op.h @@ -33,7 +33,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares template @@ -122,5 +122,5 @@ class KITTIOp : public MappableLeafOp { std::map annotation_map_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_KITTI_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/kmnist_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/kmnist_op.cc index 2c63cd8f..21788aa0 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/kmnist_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/kmnist_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { KMnistOp::KMnistOp(const std::string &usage, int32_t num_workers, const std::string &folder_path, int32_t queue_size, std::unique_ptr data_schema, std::shared_ptr sampler) @@ -84,4 +84,4 @@ Status KMnistOp::CountTotalRows(const std::string &dir, const std::string &usage return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/kmnist_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/kmnist_op.h index 14ab6cc9..37041a82 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/kmnist_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/kmnist_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Forward declares. template @@ -61,5 +61,5 @@ class KMnistOp : public MnistOp { std::string DatasetName(bool upper = false) const override { return upper ? "KMnist" : "kmnist"; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_KMNIST_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/lfw_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/lfw_op.cc index 09a6b0e0..019e83ce 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/lfw_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/lfw_op.cc @@ -25,7 +25,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const char kImageFolder[] = "lfw"; const char kImageFolderFunneled[] = "lfw_funneled"; @@ -328,4 +328,4 @@ Status LFWOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/lfw_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/lfw_op.h index 7e1d10a5..f6a57bfa 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/lfw_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/lfw_op.h @@ -34,7 +34,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class LFWOp /// \brief A source dataset for reading and parsing LFW dataset. @@ -131,5 +131,5 @@ class LFWOp : public MappableLeafOp { std::string real_folder_path_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_LFW_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.cc index b2a6f54e..ac0d7b31 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.cc @@ -1,234 +1,234 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.h" - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/audio/kernels/audio_utils.h" -#include "mindspore-lite/minddata/dataset/core/config_manager.h" -#include "mindspore-lite/minddata/dataset/core/tensor_shape.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" -#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -#include "utils/file_utils.h" - -namespace mindspore { -namespace dataset { -const int32_t label_file_suffix_len = 10; -const char label_file_suffix[] = ".trans.tsv"; -const char audio_file_suffix[] = ".wav"; -const std::vector usage_list = {"dev-clean", "dev-other", "test-clean", "test-other", - "train-clean-100", "train-clean-360", "train-other-500"}; - -LibriTTSOp::LibriTTSOp(const std::string &dataset_dir, const std::string &usage, int32_t num_workers, - int32_t queue_size, std::unique_ptr data_schema, std::shared_ptr sampler) - : MappableLeafOp(num_workers, queue_size, std::move(sampler)), - dataset_dir_(dataset_dir), - usage_(usage), - data_schema_(std::move(data_schema)) {} - -Status LibriTTSOp::LoadTensorRow(row_id_type row_id, TensorRow *trow) { - RETURN_UNEXPECTED_IF_NULL(trow); - LibriTTSLabelTuple audio_tuple = audio_label_tuples_[row_id]; - const uint32_t rate = 24000; - std::shared_ptr waveform, sample_rate, original_text, normalized_text, speaker_id, chapter_id, utterance_id; - Path dir(real_path_); - std::string file_name = audio_tuple.utterance_id + audio_file_suffix; - Path full_dir = dir / audio_tuple.usage / std::to_string(audio_tuple.speaker_id) / - std::to_string(audio_tuple.chapter_id) / file_name; - RETURN_IF_NOT_OK(ReadAudio(full_dir.ToString(), &waveform)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(rate, &sample_rate)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_tuple.original_text, &original_text)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_tuple.normalized_text, &normalized_text)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_tuple.speaker_id, &speaker_id)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_tuple.chapter_id, &chapter_id)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_tuple.utterance_id, &utterance_id)); - (*trow) = TensorRow( - row_id, {std::move(waveform), std::move(sample_rate), std::move(original_text), std::move(normalized_text), - std::move(speaker_id), std::move(chapter_id), std::move(utterance_id)}); - std::string label_path = audio_tuple.label_path; - trow->setPath({full_dir.ToString(), full_dir.ToString(), label_path, label_path, label_path, label_path, label_path}); - return Status::OK(); -} - -void LibriTTSOp::Print(std::ostream &out, bool show_all) const { - if (!show_all) { - ParallelOp::Print(out, show_all); - out << "\n"; - } else { - ParallelOp::Print(out, show_all); - out << "\nNumber of rows: " << num_rows_ << "\nLibriTTS directory: " << dataset_dir_ << "\n\n"; - } -} - -Status LibriTTSOp::CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count) { - RETURN_UNEXPECTED_IF_NULL(count); - *count = 0; - const int64_t num_samples = 0; - const int64_t start_index = 0; - auto sampler = std::make_shared(start_index, num_samples); - auto schema = std::make_unique(); - - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kCv, 1))); - TensorShape scalar_rate = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); - TensorShape scalar_original_text = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("original_text", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_original_text))); - TensorShape scalar_normalized_text = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("normalized_text", DataType(DataType::DE_STRING), - TensorImpl::kFlexible, 0, &scalar_normalized_text))); - TensorShape scalar_speaker_id = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("speaker_id", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_speaker_id))); - TensorShape scalar_chapter_id = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("chapter_id", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_chapter_id))); - TensorShape scalar_utterance_id = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("utterance_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance_id))); - std::shared_ptr cfg = GlobalContext::config_manager(); - int32_t num_workers = cfg->num_parallel_workers(); - int32_t op_connect_size = cfg->op_connector_size(); - auto op = - std::make_shared(dir, usage, num_workers, op_connect_size, std::move(schema), std::move(sampler)); - RETURN_IF_NOT_OK(op->PrepareData()); - *count = op->audio_label_tuples_.size(); - return Status::OK(); -} - -Status LibriTTSOp::ComputeColMap() { - if (column_name_id_map_.empty()) { - for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) { - column_name_id_map_[data_schema_->Column(i).Name()] = i; - } - } else { - MS_LOG(WARNING) << "Column name map is already set!"; - } - return Status::OK(); -} - -Status LibriTTSOp::ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform) { - RETURN_UNEXPECTED_IF_NULL(waveform); - const int32_t kWavFileSampleRate = 24000; - int32_t sample_rate = 0; - std::vector waveform_vec; - RETURN_IF_NOT_OK(ReadWaveFile(audio_dir, &waveform_vec, &sample_rate)); - CHECK_FAIL_RETURN_UNEXPECTED( - sample_rate == kWavFileSampleRate, - "Invalid file, sampling rate of LibriTTS wav file must be 24000, file path: " + audio_dir); - RETURN_IF_NOT_OK(Tensor::CreateFromVector(waveform_vec, waveform)); - RETURN_IF_NOT_OK((*waveform)->ExpandDim(0)); - return Status::OK(); -} - -Status LibriTTSOp::PrepareData() { - auto realpath = FileUtils::GetRealPath(dataset_dir_.c_str()); - if (!realpath.has_value()) { - MS_LOG(ERROR) << "Invalid file path, LibriTTS dataset dir: " << dataset_dir_ << " does not exist."; - RETURN_STATUS_UNEXPECTED("Invalid file path, LibriTTS dataset dir: " + dataset_dir_ + " does not exist."); - } - real_path_ = realpath.value(); - Path dir(real_path_); - if (usage_ != "all") { - Path full_dir = dir / usage_; - cur_usage_ = usage_; - RETURN_IF_NOT_OK(GetPaths(&full_dir)); - RETURN_IF_NOT_OK(GetLabels()); - } else { - for (std::string usage_iter : usage_list) { - cur_usage_ = usage_iter; - Path full_dir = dir / cur_usage_; - RETURN_IF_NOT_OK(GetPaths(&full_dir)); - RETURN_IF_NOT_OK(GetLabels()); - } - } - num_rows_ = audio_label_tuples_.size(); - CHECK_FAIL_RETURN_UNEXPECTED(num_rows_ > 0, - "Invalid data, no valid data matching the dataset API LibriTTSDataset. " - "Please check dataset API or file path: " + - dataset_dir_ + "."); - return Status::OK(); -} - -Status LibriTTSOp::GetPaths(Path *dir) { - RETURN_UNEXPECTED_IF_NULL(dir); - auto iter = Path::DirIterator::OpenDirectory(dir); - if (iter == nullptr) { - MS_LOG(WARNING) << "Invalid file path, unable to open directory: " << dir->ToString() << "."; - } else { - while (iter->HasNext()) { - Path sub_dir = iter->Next(); - if (sub_dir.IsDirectory()) { - RETURN_IF_NOT_OK(GetPaths(&sub_dir)); - } else { - Path file_path = sub_dir; - std::string file_name = file_path.Basename(); - int32_t length = file_name.size(); - if (length > label_file_suffix_len && file_name.substr(length - label_file_suffix_len) == label_file_suffix) { - label_files_.push_back(sub_dir.ToString()); - return Status::OK(); - } - } - } - } - return Status::OK(); -} - -Status LibriTTSOp::GetLabels() { - std::string utterance_id_body = ""; - std::string original_text_body = ""; - std::string normalized_text_body = ""; - const uint32_t base = 10; - const uint32_t ascii_zero = 48; - const size_t underline_exact = 3; - for (std::string label_file : label_files_) { - std::ifstream label_reader(label_file, std::ios::in); - while (getline(label_reader, utterance_id_body, '\t')) { - getline(label_reader, original_text_body, '\t'); - getline(label_reader, normalized_text_body, '\n'); - uint32_t speaker_id = 0; - uint32_t chapter_id = 0; - size_t underline_num = 0; - size_t underline_inx[4] = {0}; - for (size_t i = 0; i < utterance_id_body.size() && underline_num <= underline_exact; i++) { - if (utterance_id_body[i] == '_') { - underline_inx[underline_num++] = i; - } - } - if (underline_num != underline_exact) { - label_reader.close(); - RETURN_STATUS_UNEXPECTED("Invalid file, the file may not be a LibriTTS dataset file: " + label_file); - } - for (size_t i = 0; i < underline_inx[0]; i++) { - speaker_id = speaker_id * base + utterance_id_body[i] - ascii_zero; - } - for (size_t i = underline_inx[0] + 1; i < underline_inx[1]; i++) { - chapter_id = chapter_id * base + utterance_id_body[i] - ascii_zero; - } - audio_label_tuples_.push_back( - {cur_usage_, utterance_id_body, original_text_body, normalized_text_body, speaker_id, chapter_id, label_file}); - } - label_reader.close(); - } - label_files_.clear(); - return Status::OK(); -} -} // namespace dataset. -} // namespace mindspore. +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.h" + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/audio/kernels/audio_utils.h" +#include "mindspore-lite/minddata/dataset/core/config_manager.h" +#include "mindspore-lite/minddata/dataset/core/tensor_shape.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" +#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" +#include "utils/file_utils.h" + +namespace mindspore::lite { +namespace dataset { +const int32_t label_file_suffix_len = 10; +const char label_file_suffix[] = ".trans.tsv"; +const char audio_file_suffix[] = ".wav"; +const std::vector usage_list = {"dev-clean", "dev-other", "test-clean", "test-other", + "train-clean-100", "train-clean-360", "train-other-500"}; + +LibriTTSOp::LibriTTSOp(const std::string &dataset_dir, const std::string &usage, int32_t num_workers, + int32_t queue_size, std::unique_ptr data_schema, std::shared_ptr sampler) + : MappableLeafOp(num_workers, queue_size, std::move(sampler)), + dataset_dir_(dataset_dir), + usage_(usage), + data_schema_(std::move(data_schema)) {} + +Status LibriTTSOp::LoadTensorRow(row_id_type row_id, TensorRow *trow) { + RETURN_UNEXPECTED_IF_NULL(trow); + LibriTTSLabelTuple audio_tuple = audio_label_tuples_[row_id]; + const uint32_t rate = 24000; + std::shared_ptr waveform, sample_rate, original_text, normalized_text, speaker_id, chapter_id, utterance_id; + Path dir(real_path_); + std::string file_name = audio_tuple.utterance_id + audio_file_suffix; + Path full_dir = dir / audio_tuple.usage / std::to_string(audio_tuple.speaker_id) / + std::to_string(audio_tuple.chapter_id) / file_name; + RETURN_IF_NOT_OK(ReadAudio(full_dir.ToString(), &waveform)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(rate, &sample_rate)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_tuple.original_text, &original_text)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_tuple.normalized_text, &normalized_text)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_tuple.speaker_id, &speaker_id)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_tuple.chapter_id, &chapter_id)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(audio_tuple.utterance_id, &utterance_id)); + (*trow) = TensorRow( + row_id, {std::move(waveform), std::move(sample_rate), std::move(original_text), std::move(normalized_text), + std::move(speaker_id), std::move(chapter_id), std::move(utterance_id)}); + std::string label_path = audio_tuple.label_path; + trow->setPath({full_dir.ToString(), full_dir.ToString(), label_path, label_path, label_path, label_path, label_path}); + return Status::OK(); +} + +void LibriTTSOp::Print(std::ostream &out, bool show_all) const { + if (!show_all) { + ParallelOp::Print(out, show_all); + out << "\n"; + } else { + ParallelOp::Print(out, show_all); + out << "\nNumber of rows: " << num_rows_ << "\nLibriTTS directory: " << dataset_dir_ << "\n\n"; + } +} + +Status LibriTTSOp::CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count) { + RETURN_UNEXPECTED_IF_NULL(count); + *count = 0; + const int64_t num_samples = 0; + const int64_t start_index = 0; + auto sampler = std::make_shared(start_index, num_samples); + auto schema = std::make_unique(); + + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kCv, 1))); + TensorShape scalar_rate = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); + TensorShape scalar_original_text = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("original_text", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_original_text))); + TensorShape scalar_normalized_text = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("normalized_text", DataType(DataType::DE_STRING), + TensorImpl::kFlexible, 0, &scalar_normalized_text))); + TensorShape scalar_speaker_id = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("speaker_id", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_speaker_id))); + TensorShape scalar_chapter_id = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("chapter_id", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_chapter_id))); + TensorShape scalar_utterance_id = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("utterance_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance_id))); + std::shared_ptr cfg = GlobalContext::config_manager(); + int32_t num_workers = cfg->num_parallel_workers(); + int32_t op_connect_size = cfg->op_connector_size(); + auto op = + std::make_shared(dir, usage, num_workers, op_connect_size, std::move(schema), std::move(sampler)); + RETURN_IF_NOT_OK(op->PrepareData()); + *count = op->audio_label_tuples_.size(); + return Status::OK(); +} + +Status LibriTTSOp::ComputeColMap() { + if (column_name_id_map_.empty()) { + for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) { + column_name_id_map_[data_schema_->Column(i).Name()] = i; + } + } else { + MS_LOG(WARNING) << "Column name map is already set!"; + } + return Status::OK(); +} + +Status LibriTTSOp::ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform) { + RETURN_UNEXPECTED_IF_NULL(waveform); + const int32_t kWavFileSampleRate = 24000; + int32_t sample_rate = 0; + std::vector waveform_vec; + RETURN_IF_NOT_OK(ReadWaveFile(audio_dir, &waveform_vec, &sample_rate)); + CHECK_FAIL_RETURN_UNEXPECTED( + sample_rate == kWavFileSampleRate, + "Invalid file, sampling rate of LibriTTS wav file must be 24000, file path: " + audio_dir); + RETURN_IF_NOT_OK(Tensor::CreateFromVector(waveform_vec, waveform)); + RETURN_IF_NOT_OK((*waveform)->ExpandDim(0)); + return Status::OK(); +} + +Status LibriTTSOp::PrepareData() { + auto realpath = FileUtils::GetRealPath(dataset_dir_.c_str()); + if (!realpath.has_value()) { + MS_LOG(ERROR) << "Invalid file path, LibriTTS dataset dir: " << dataset_dir_ << " does not exist."; + RETURN_STATUS_UNEXPECTED("Invalid file path, LibriTTS dataset dir: " + dataset_dir_ + " does not exist."); + } + real_path_ = realpath.value(); + Path dir(real_path_); + if (usage_ != "all") { + Path full_dir = dir / usage_; + cur_usage_ = usage_; + RETURN_IF_NOT_OK(GetPaths(&full_dir)); + RETURN_IF_NOT_OK(GetLabels()); + } else { + for (std::string usage_iter : usage_list) { + cur_usage_ = usage_iter; + Path full_dir = dir / cur_usage_; + RETURN_IF_NOT_OK(GetPaths(&full_dir)); + RETURN_IF_NOT_OK(GetLabels()); + } + } + num_rows_ = audio_label_tuples_.size(); + CHECK_FAIL_RETURN_UNEXPECTED(num_rows_ > 0, + "Invalid data, no valid data matching the dataset API LibriTTSDataset. " + "Please check dataset API or file path: " + + dataset_dir_ + "."); + return Status::OK(); +} + +Status LibriTTSOp::GetPaths(Path *dir) { + RETURN_UNEXPECTED_IF_NULL(dir); + auto iter = Path::DirIterator::OpenDirectory(dir); + if (iter == nullptr) { + MS_LOG(WARNING) << "Invalid file path, unable to open directory: " << dir->ToString() << "."; + } else { + while (iter->HasNext()) { + Path sub_dir = iter->Next(); + if (sub_dir.IsDirectory()) { + RETURN_IF_NOT_OK(GetPaths(&sub_dir)); + } else { + Path file_path = sub_dir; + std::string file_name = file_path.Basename(); + int32_t length = file_name.size(); + if (length > label_file_suffix_len && file_name.substr(length - label_file_suffix_len) == label_file_suffix) { + label_files_.push_back(sub_dir.ToString()); + return Status::OK(); + } + } + } + } + return Status::OK(); +} + +Status LibriTTSOp::GetLabels() { + std::string utterance_id_body = ""; + std::string original_text_body = ""; + std::string normalized_text_body = ""; + const uint32_t base = 10; + const uint32_t ascii_zero = 48; + const size_t underline_exact = 3; + for (std::string label_file : label_files_) { + std::ifstream label_reader(label_file, std::ios::in); + while (getline(label_reader, utterance_id_body, '\t')) { + getline(label_reader, original_text_body, '\t'); + getline(label_reader, normalized_text_body, '\n'); + uint32_t speaker_id = 0; + uint32_t chapter_id = 0; + size_t underline_num = 0; + size_t underline_inx[4] = {0}; + for (size_t i = 0; i < utterance_id_body.size() && underline_num <= underline_exact; i++) { + if (utterance_id_body[i] == '_') { + underline_inx[underline_num++] = i; + } + } + if (underline_num != underline_exact) { + label_reader.close(); + RETURN_STATUS_UNEXPECTED("Invalid file, the file may not be a LibriTTS dataset file: " + label_file); + } + for (size_t i = 0; i < underline_inx[0]; i++) { + speaker_id = speaker_id * base + utterance_id_body[i] - ascii_zero; + } + for (size_t i = underline_inx[0] + 1; i < underline_inx[1]; i++) { + chapter_id = chapter_id * base + utterance_id_body[i] - ascii_zero; + } + audio_label_tuples_.push_back( + {cur_usage_, utterance_id_body, original_text_body, normalized_text_body, speaker_id, chapter_id, label_file}); + } + label_reader.close(); + } + label_files_.clear(); + return Status::OK(); +} +} // namespace dataset. +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.h index 5fb7d720..1e606f6e 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.h @@ -1,120 +1,120 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_LIBRI_TTS_OP_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_LIBRI_TTS_OP_H_ - -#include -#include -#include -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/core/tensor.h" -#include "mindspore-lite/minddata/dataset/engine/data_schema.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -#include "mindspore-lite/minddata/dataset/util/path.h" -#include "mindspore-lite/minddata/dataset/util/queue.h" -#include "mindspore-lite/minddata/dataset/util/status.h" -#include "mindspore-lite/minddata/dataset/util/wait_post.h" - -namespace mindspore { -namespace dataset { -struct LibriTTSLabelTuple { - std::string usage; - std::string utterance_id; - std::string original_text; - std::string normalized_text; - uint32_t speaker_id; - uint32_t chapter_id; - std::string label_path; -}; - -class LibriTTSOp : public MappableLeafOp { - public: - /// \brief Constructor. - /// \param[in] dataset_dir Dir directory of LibriTTS. - /// \param[in] usage usage of this dataset, can be "dev-clean", "dev-other", "test-clean", "test-other", - /// "train-clean-100", "train-clean-360", "train-other-500", or "all". - /// \param[in] num_workers Number of workers reading audios in parallel. - /// \param[in] queue_size Connector queue size. - /// \param[in] data_schema The schema of the LibriTTS dataset. - /// \param[in] sampler Sampler tells LibriSpeechOp what to read. - LibriTTSOp(const std::string &dataset_dir, const std::string &usage, int32_t num_workers, int32_t queue_size, - std::unique_ptr data_schema, std::shared_ptr sampler); - - /// \brief Destructor. - ~LibriTTSOp() = default; - - /// \brief A print method typically used for debugging. - /// \param[out] out Output stream. - /// \param[in] show_all Whether to show all information. - void Print(std::ostream &out, bool show_all) const override; - - /// \brief Function to count the number of samples in the LibriTTS dataset. - /// \param[in] dir Path to the LibriTTS directory. - /// \param[in] usage Select the data set section. - /// \param[out] count Output arg that will hold the minimum of the actual dataset size and numSamples. - /// \return Status The status code returned. - static Status CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count); - - /// \brief Op name getter. - /// \return Name of the current Op. - std::string Name() const override { return "LibriTTSOp"; } - - private: - /// \brief Load a tensor row according to a pair. - /// \param[in] row_id Id for this tensor row. - /// \param[out] row Audio & label read into this tensor row. - /// \return Status The status code returned. - Status LoadTensorRow(row_id_type row_id, TensorRow *row) override; - - /// \brief Read all paths in the directory. - /// \param[in] dir File path to be traversed. - /// \return Status The status code returned. - Status GetPaths(Path *dir); - - /// \brief Read all label files. - /// \return Status The status code returned. - Status GetLabels(); - - /// \brief Parse a single wav file. - /// \param[in] audio_dir Audio file path. - /// \param[out] waveform The output waveform tensor. - /// \return Status The status code returned. - Status ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform); - - /// \brief Prepare all data in the directory. - /// \return Status The status code returned. - Status PrepareData(); - - /// \brief Private function for computing the assignment of the column name map. - /// \return Status The status code returned. - Status ComputeColMap() override; - - const std::string usage_; - std::string cur_usage_; - std::string real_path_; - std::string dataset_dir_; - std::unique_ptr data_schema_; - std::vector audio_label_tuples_; - std::vector label_files_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_LIBRI_TTS_OP_H_ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_LIBRI_TTS_OP_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_LIBRI_TTS_OP_H_ + +#include +#include +#include +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/core/tensor.h" +#include "mindspore-lite/minddata/dataset/engine/data_schema.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" +#include "mindspore-lite/minddata/dataset/util/path.h" +#include "mindspore-lite/minddata/dataset/util/queue.h" +#include "mindspore-lite/minddata/dataset/util/status.h" +#include "mindspore-lite/minddata/dataset/util/wait_post.h" + +namespace mindspore::lite { +namespace dataset { +struct LibriTTSLabelTuple { + std::string usage; + std::string utterance_id; + std::string original_text; + std::string normalized_text; + uint32_t speaker_id; + uint32_t chapter_id; + std::string label_path; +}; + +class LibriTTSOp : public MappableLeafOp { + public: + /// \brief Constructor. + /// \param[in] dataset_dir Dir directory of LibriTTS. + /// \param[in] usage usage of this dataset, can be "dev-clean", "dev-other", "test-clean", "test-other", + /// "train-clean-100", "train-clean-360", "train-other-500", or "all". + /// \param[in] num_workers Number of workers reading audios in parallel. + /// \param[in] queue_size Connector queue size. + /// \param[in] data_schema The schema of the LibriTTS dataset. + /// \param[in] sampler Sampler tells LibriSpeechOp what to read. + LibriTTSOp(const std::string &dataset_dir, const std::string &usage, int32_t num_workers, int32_t queue_size, + std::unique_ptr data_schema, std::shared_ptr sampler); + + /// \brief Destructor. + ~LibriTTSOp() = default; + + /// \brief A print method typically used for debugging. + /// \param[out] out Output stream. + /// \param[in] show_all Whether to show all information. + void Print(std::ostream &out, bool show_all) const override; + + /// \brief Function to count the number of samples in the LibriTTS dataset. + /// \param[in] dir Path to the LibriTTS directory. + /// \param[in] usage Select the data set section. + /// \param[out] count Output arg that will hold the minimum of the actual dataset size and numSamples. + /// \return Status The status code returned. + static Status CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count); + + /// \brief Op name getter. + /// \return Name of the current Op. + std::string Name() const override { return "LibriTTSOp"; } + + private: + /// \brief Load a tensor row according to a pair. + /// \param[in] row_id Id for this tensor row. + /// \param[out] row Audio & label read into this tensor row. + /// \return Status The status code returned. + Status LoadTensorRow(row_id_type row_id, TensorRow *row) override; + + /// \brief Read all paths in the directory. + /// \param[in] dir File path to be traversed. + /// \return Status The status code returned. + Status GetPaths(Path *dir); + + /// \brief Read all label files. + /// \return Status The status code returned. + Status GetLabels(); + + /// \brief Parse a single wav file. + /// \param[in] audio_dir Audio file path. + /// \param[out] waveform The output waveform tensor. + /// \return Status The status code returned. + Status ReadAudio(const std::string &audio_dir, std::shared_ptr *waveform); + + /// \brief Prepare all data in the directory. + /// \return Status The status code returned. + Status PrepareData(); + + /// \brief Private function for computing the assignment of the column name map. + /// \return Status The status code returned. + Status ComputeColMap() override; + + const std::string usage_; + std::string cur_usage_; + std::string real_path_; + std::string dataset_dir_; + std::unique_ptr data_schema_; + std::vector audio_label_tuples_; + std::vector label_files_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_LIBRI_TTS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/lj_speech_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/lj_speech_op.cc index a6457fb4..b9f8fec9 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/lj_speech_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/lj_speech_op.cc @@ -26,7 +26,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { LJSpeechOp::LJSpeechOp(const std::string &file_dir, int32_t num_workers, int32_t queue_size, std::unique_ptr data_schema, std::shared_ptr sampler) @@ -151,4 +151,4 @@ Status LJSpeechOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/lj_speech_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/lj_speech_op.h index 292e545f..30be2b48 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/lj_speech_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/lj_speech_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Read LJSpeech dataset. class LJSpeechOp : public MappableLeafOp { @@ -82,5 +82,5 @@ class LJSpeechOp : public MappableLeafOp { std::vector> meta_info_list_; // the shape is (N, 3) }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_LJ_SPEECH_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/lsun_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/lsun_op.cc index 00c6e774..1e867afe 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/lsun_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/lsun_op.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { LSUNOp::LSUNOp(int32_t num_wkrs, const std::string &file_dir, int32_t queue_size, const std::string &usage, const std::vector &classes, bool do_decode, std::unique_ptr data_schema, @@ -166,4 +166,4 @@ Status LSUNOp::GetNumClasses(int64_t *num_classes) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/lsun_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/lsun_op.h index 7f9cbac0..abff12a2 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/lsun_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/lsun_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Forward declares. template @@ -110,5 +110,5 @@ class LSUNOp : public ImageFolderOp { std::vector classes_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_LSUN_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/manifest_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/manifest_op.cc index 704dfecf..287820e7 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/manifest_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/manifest_op.cc @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { ManifestOp::ManifestOp(int32_t num_works, std::string file, int32_t queue_size, bool decode, const std::map &class_index, std::unique_ptr data_schema, @@ -312,4 +312,4 @@ Status ManifestOp::GetClassIndexing(std::vector>> image_labelname_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_MANIFEST_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.cc index 5865fce7..970dd34f 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { MappableLeafOp::MappableLeafOp(int32_t num_wkrs, int32_t queue_size, std::shared_ptr sampler) : ParallelOp(num_wkrs, queue_size, std::move(sampler)), @@ -249,4 +249,4 @@ Status MappableLeafOp::ResetAndUpdateRepeat() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h index 85dfd11e..6f295485 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h @@ -39,7 +39,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares template @@ -136,5 +136,5 @@ class MappableLeafOp : public ParallelOp, TensorRow>, p ImplementedPullMode PullModeImplementationStatus() const override { return ImplementedPullMode::Implemented; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_MAPPABLE_LEAF_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/mindrecord_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/mindrecord_op.cc index 78070df7..687b946c 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/mindrecord_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/mindrecord_op.cc @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using mindrecord::kInt64Len; @@ -470,4 +470,4 @@ TensorRow MindRecordOp::operator[](size_t index) { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/mindrecord_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/mindrecord_op.h index 498e4b76..55cda305 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/mindrecord_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/mindrecord_op.h @@ -37,7 +37,7 @@ #include "minddata/mindrecord/include/common/shard_utils.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares template @@ -193,5 +193,5 @@ class MindRecordOp : public MappableLeafOp { ShuffleMode shuffle_mode_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_MINDRECORD_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.cc index 8a1c1165..cbdba387 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int32_t kMnistImageFileMagicNumber = 2051; const int32_t kMnistLabelFileMagicNumber = 2049; @@ -340,4 +340,4 @@ Status MnistOp::ComputeColMap() { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.h index 25c77e26..cc933673 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.h @@ -34,7 +34,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares template @@ -142,5 +142,5 @@ class MnistOp : public MappableLeafOp { std::vector label_path_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_MNIST_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/multi30k_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/multi30k_op.cc index 91d3b876..ed04b0a9 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/multi30k_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/multi30k_op.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // constructor of Multi30k. Multi30kOp::Multi30kOp(int32_t num_workers, int64_t num_samples, const std::vector &language_pair, @@ -147,4 +147,4 @@ Status Multi30kOp::LoadFile(const std::string &file_en, int64_t start_offset, in return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/multi30k_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/multi30k_op.h index a3026dea..fb6bd15d 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/multi30k_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/multi30k_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class JaggedConnector; using StringIndex = AutoIndexObj; @@ -81,5 +81,5 @@ class Multi30kOp : public TextFileOp { std::vector language_pair_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_MULTI30K_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.cc index a58e4ca0..10bb1e67 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.cc @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/util/task_manager.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { NonMappableLeafOp::NonMappableLeafOp(int32_t num_workers, int32_t worker_connector_size, int64_t total_num_rows, int32_t op_connector_size, bool shuffle_files, int32_t num_devices, @@ -417,4 +417,4 @@ Status NonMappableLeafOp::ResetAndUpdateRepeat() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.h index 5044b4cd..3c18c784 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { template class Queue; @@ -233,5 +233,5 @@ class NonMappableLeafOp : public ParallelOp { uint32_t seed_; // used to shuffle filename indices }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_NONMAPPABLE_LEAF_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/omniglot_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/omniglot_op.cc index 0862faac..0cf80d07 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/omniglot_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/omniglot_op.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { OmniglotOp::OmniglotOp(int32_t num_wkrs, const std::string &file_dir, int32_t queue_size, bool background, bool do_decode, std::unique_ptr data_schema, @@ -127,4 +127,4 @@ Status OmniglotOp::GetNumClasses(int64_t *num_classes) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/omniglot_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/omniglot_op.h index 8d48efe6..170714d4 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/omniglot_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/omniglot_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/image_folder_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares. template @@ -94,5 +94,5 @@ class OmniglotOp : public ImageFolderOp { Status RecursiveWalkFolder(Path *dir) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_OMNIGLOT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.cc index d0608689..c8923adc 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.cc @@ -1,55 +1,55 @@ -/** - * Copyright 2021-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.h" - -#include "include/common/debug/common.h" -#include "mindspore-lite/minddata/dataset/core/config_manager.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.h" -#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -#include "mindspore-lite/minddata/dataset/util/random.h" -#include "mindspore-lite/minddata/dataset/util/wait_post.h" - -namespace mindspore { -namespace dataset { -PennTreebankOp::PennTreebankOp(int32_t num_workers, int64_t total_rows, int32_t worker_connector_size, - std::unique_ptr schema, const std::vector &file_list, - int32_t op_connector_size, bool shuffle_files, int32_t num_devices, int32_t device_id) - : TextFileOp(num_workers, total_rows, worker_connector_size, std::move(schema), file_list, op_connector_size, - shuffle_files, num_devices, device_id) {} - -// A print method typically used for debugging. -void PennTreebankOp::Print(std::ostream &out, bool show_all) const { - if (!show_all) { - // Call the super class for displaying any common 1-liner info. - ParallelOp::Print(out, show_all); - // Then show any custom derived-internal 1-liner info for this op. - out << "\n"; - } else { - // Call the super class for displaying any common detailed info. - ParallelOp::Print(out, show_all); - // Then show any custom derived-internal stuff. - out << "\nRow count: " << total_rows_ << "\nDevice id: " << device_id_ << "\nNumber of devices: " << num_devices_ - << "\nShuffle files: " << ((shuffle_files_) ? "yes" : "no") << "\nPennTreebank files list:\n"; - for (size_t i = 0; i < text_files_list_.size(); ++i) { - out << " " << text_files_list_[i]; - } - out << "\nData Schema:\n"; - out << *data_schema_ << "\n\n"; - } -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.h" + +#include "include/common/debug/common.h" +#include "mindspore-lite/minddata/dataset/core/config_manager.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.h" +#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" +#include "mindspore-lite/minddata/dataset/util/random.h" +#include "mindspore-lite/minddata/dataset/util/wait_post.h" + +namespace mindspore::lite { +namespace dataset { +PennTreebankOp::PennTreebankOp(int32_t num_workers, int64_t total_rows, int32_t worker_connector_size, + std::unique_ptr schema, const std::vector &file_list, + int32_t op_connector_size, bool shuffle_files, int32_t num_devices, int32_t device_id) + : TextFileOp(num_workers, total_rows, worker_connector_size, std::move(schema), file_list, op_connector_size, + shuffle_files, num_devices, device_id) {} + +// A print method typically used for debugging. +void PennTreebankOp::Print(std::ostream &out, bool show_all) const { + if (!show_all) { + // Call the super class for displaying any common 1-liner info. + ParallelOp::Print(out, show_all); + // Then show any custom derived-internal 1-liner info for this op. + out << "\n"; + } else { + // Call the super class for displaying any common detailed info. + ParallelOp::Print(out, show_all); + // Then show any custom derived-internal stuff. + out << "\nRow count: " << total_rows_ << "\nDevice id: " << device_id_ << "\nNumber of devices: " << num_devices_ + << "\nShuffle files: " << ((shuffle_files_) ? "yes" : "no") << "\nPennTreebank files list:\n"; + for (size_t i = 0; i < text_files_list_.size(); ++i) { + out << " " << text_files_list_[i]; + } + out << "\nData Schema:\n"; + out << *data_schema_ << "\n\n"; + } +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.h index eb46377d..a46d40cc 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.h @@ -1,69 +1,69 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_PENN_TREEBANK_OP_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_PENN_TREEBANK_OP_H_ - -#include -#include -#include -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h" -#include "mindspore-lite/minddata/dataset/util/queue.h" - -namespace mindspore { -namespace dataset { -class JaggedConnector; - -class PennTreebankOp : public TextFileOp { - public: - /// \brief Constructor. - /// \param[in] num_workers Number of workers reading images in parallel - /// \param[in] num_samples The number of samples to be included in the dataset. - /// \param[in] worker_connector_size Size of each internal queue. - /// \param[in] data_schema Path to dataset schema file. - /// \param[in] file_list List of files to be read to search for a pattern of files. The list - /// will be sorted in a lexicographical order. - /// \param[in] op_connector_size Size of each queue in the connector that the child operator pulls from. - /// \param[in] shuffle_files Whether or not to shuffle the files before reading data. - /// \param[in] num_devices Number of devices that the dataset should be divided into. - /// \param[in] device_id The device ID within num_devices. This argument should be - /// specified only when num_devices is also specified. - PennTreebankOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, std::unique_ptr, - const std::vector &file_list, int32_t op_connector_size, bool shuffle_files, - int32_t num_devices, int32_t device_id); - - /// \brief Default destructor. - ~PennTreebankOp() = default; - - /// \brief A print method typically used for debugging. - /// \param[in] out he output stream to write output to. - /// \param[in] show_all A bool to control if you want to show all info or just a summary. - void Print(std::ostream &out, bool show_all) const override; - - /// \brief Op name getter. - /// \return Name of the current Op. - std::string Name() const override { return "PennTreebankOp"; } - - /// \brief DatasetName name getter. - /// \return DatasetName of the current Op. - std::string DatasetName(bool upper = false) const { return upper ? "PennTreebank" : "penn treebank"; } -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_PENN_TREEBANK_OP_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_PENN_TREEBANK_OP_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_PENN_TREEBANK_OP_H_ + +#include +#include +#include +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h" +#include "mindspore-lite/minddata/dataset/util/queue.h" + +namespace mindspore::lite { +namespace dataset { +class JaggedConnector; + +class PennTreebankOp : public TextFileOp { + public: + /// \brief Constructor. + /// \param[in] num_workers Number of workers reading images in parallel + /// \param[in] num_samples The number of samples to be included in the dataset. + /// \param[in] worker_connector_size Size of each internal queue. + /// \param[in] data_schema Path to dataset schema file. + /// \param[in] file_list List of files to be read to search for a pattern of files. The list + /// will be sorted in a lexicographical order. + /// \param[in] op_connector_size Size of each queue in the connector that the child operator pulls from. + /// \param[in] shuffle_files Whether or not to shuffle the files before reading data. + /// \param[in] num_devices Number of devices that the dataset should be divided into. + /// \param[in] device_id The device ID within num_devices. This argument should be + /// specified only when num_devices is also specified. + PennTreebankOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, std::unique_ptr, + const std::vector &file_list, int32_t op_connector_size, bool shuffle_files, + int32_t num_devices, int32_t device_id); + + /// \brief Default destructor. + ~PennTreebankOp() = default; + + /// \brief A print method typically used for debugging. + /// \param[in] out he output stream to write output to. + /// \param[in] show_all A bool to control if you want to show all info or just a summary. + void Print(std::ostream &out, bool show_all) const override; + + /// \brief Op name getter. + /// \return Name of the current Op. + std::string Name() const override { return "PennTreebankOp"; } + + /// \brief DatasetName name getter. + /// \return DatasetName of the current Op. + std::string DatasetName(bool upper = false) const { return upper ? "PennTreebank" : "penn treebank"; } +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_PENN_TREEBANK_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/photo_tour_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/photo_tour_op.cc index f0de4d88..bc2a51b8 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/photo_tour_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/photo_tour_op.cc @@ -28,7 +28,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr uint32_t kPatchNumPerRow = 16; constexpr uint32_t kPatchNumPerCol = 16; @@ -418,4 +418,4 @@ Status PhotoTourOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/photo_tour_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/photo_tour_op.h index f043a6c4..b834226d 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/photo_tour_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/photo_tour_op.h @@ -37,7 +37,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares template @@ -152,5 +152,5 @@ class PhotoTourOp : public MappableLeafOp { std::mutex access_mutex_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_PHOTO_TOUR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/places365_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/places365_op.cc index 332ad2eb..451cc085 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/places365_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/places365_op.cc @@ -29,7 +29,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr char kCategoriesMeta[] = "categories_places365.txt"; const std::map kFileListMeta = {{"train-standard", "places365_train_standard.txt"}, @@ -309,4 +309,4 @@ Status Places365Op::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/places365_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/places365_op.h index 7dcc0b06..71c83d17 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/places365_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/places365_op.h @@ -33,7 +33,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares template @@ -133,5 +133,5 @@ class Places365Op : public MappableLeafOp { std::vector> image_path_label_pairs_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_PLACES365_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/qmnist_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/qmnist_op.cc index 69206ab2..d6f91390 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/qmnist_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/qmnist_op.cc @@ -29,7 +29,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int32_t kQMnistLabelFileMagicNumber = 3074; const size_t kQMnistImageRows = 28; @@ -329,4 +329,4 @@ Status QMnistOp::CheckLabel(const std::string &file_name, std::ifstream *label_r return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/qmnist_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/qmnist_op.h index 57a97dc2..75975a17 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/qmnist_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/qmnist_op.h @@ -1,113 +1,113 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_QMNIST_OP_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_QMNIST_OP_H_ - -#include -#include -#include -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/core/tensor.h" -#include "mindspore-lite/minddata/dataset/engine/data_schema.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -#include "mindspore-lite/minddata/dataset/util/path.h" -#include "mindspore-lite/minddata/dataset/util/queue.h" -#include "mindspore-lite/minddata/dataset/util/status.h" -#include "mindspore-lite/minddata/dataset/util/wait_post.h" - -namespace mindspore { -namespace dataset { - -using QMnistImageInfoPair = std::pair, std::shared_ptr>; - -class QMnistOp : public MnistOp { - public: - // Constructor. - // @param const std::string &folder_path - dir directory of QMNIST data file. - // @param const std::string &usage - Usage of this dataset, can be 'train', 'test', 'test10k', 'test50k', 'nist' or - // 'all'. - // @param bool compat - Compatibility with Mnist. - // @param std::unique_ptr data_schema - the schema of the QMNIST dataset. - // @param td::unique_ptr sampler - sampler tells QMnistOp what to read. - // @param int32_t num_workers - number of workers reading images in parallel. - // @param int32_t queue_size - connector queue size. - QMnistOp(const std::string &folder_path, const std::string &usage, bool compat, - std::unique_ptr data_schema, std::shared_ptr sampler, int32_t num_workers, - int32_t queue_size); - - // Destructor. - ~QMnistOp() = default; - - // Op name getter. - // @return std::string - Name of the current Op. - std::string Name() const override { return "QMnistOp"; } - - // DatasetName name getter - // \return std::string - DatasetName of the current Op - std::string DatasetName(bool upper = false) const { return upper ? "QMnist" : "qmnist"; } - - // A print method typically used for debugging. - // @param std::ostream &out - out stream. - // @param bool show_all - whether to show all information. - void Print(std::ostream &out, bool show_all) const override; - - // Function to count the number of samples in the QMNIST dataset. - // @param const std::string &dir - path to the QMNIST directory. - // @param const std::string &usage - Usage of this dataset, can be 'train', 'test', 'test10k', 'test50k', 'nist' or - // 'all'. - // @param int64_t *count - output arg that will hold the actual dataset size. - // @return Status -The status code returned. - static Status CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count); - - private: - // Load a tensor row according to a pair. - // @param row_id_type row_id - id for this tensor row. - // @param TensorRow row - image & label read into this tensor row. - // @return Status - The status code returned. - Status LoadTensorRow(row_id_type row_id, TensorRow *trow) override; - - // Get needed files in the folder_path_. - // @return Status - The status code returned. - Status WalkAllFiles() override; - - // Read images and labels from the file stream. - // @param std::ifstream *image_reader - image file stream. - // @param std::ifstream *label_reader - label file stream. - // @param size_t index - the index of file that is reading. - // @return Status The status code returned. - Status ReadImageAndLabel(std::ifstream *image_reader, std::ifstream *label_reader, size_t index) override; - - // Check label stream. - // @param const std::string &file_name - label file name. - // @param std::ifstream *label_reader - label file stream. - // @param uint32_t num_labels - returns the number of labels. - // @return Status The status code returned. - Status CheckLabel(const std::string &file_name, std::ifstream *label_reader, uint32_t *num_labels) override; - - const bool compat_; // compatible with mnist - - std::vector image_info_pairs_; -}; -} // namespace dataset -} // namespace mindspore - -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_QMNIST_OP_H_ +/** + * Copyright 2021-2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_QMNIST_OP_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_QMNIST_OP_H_ + +#include +#include +#include +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/core/tensor.h" +#include "mindspore-lite/minddata/dataset/engine/data_schema.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mnist_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" +#include "mindspore-lite/minddata/dataset/util/path.h" +#include "mindspore-lite/minddata/dataset/util/queue.h" +#include "mindspore-lite/minddata/dataset/util/status.h" +#include "mindspore-lite/minddata/dataset/util/wait_post.h" + +namespace mindspore::lite { +namespace dataset { + +using QMnistImageInfoPair = std::pair, std::shared_ptr>; + +class QMnistOp : public MnistOp { + public: + // Constructor. + // @param const std::string &folder_path - dir directory of QMNIST data file. + // @param const std::string &usage - Usage of this dataset, can be 'train', 'test', 'test10k', 'test50k', 'nist' or + // 'all'. + // @param bool compat - Compatibility with Mnist. + // @param std::unique_ptr data_schema - the schema of the QMNIST dataset. + // @param td::unique_ptr sampler - sampler tells QMnistOp what to read. + // @param int32_t num_workers - number of workers reading images in parallel. + // @param int32_t queue_size - connector queue size. + QMnistOp(const std::string &folder_path, const std::string &usage, bool compat, + std::unique_ptr data_schema, std::shared_ptr sampler, int32_t num_workers, + int32_t queue_size); + + // Destructor. + ~QMnistOp() = default; + + // Op name getter. + // @return std::string - Name of the current Op. + std::string Name() const override { return "QMnistOp"; } + + // DatasetName name getter + // \return std::string - DatasetName of the current Op + std::string DatasetName(bool upper = false) const { return upper ? "QMnist" : "qmnist"; } + + // A print method typically used for debugging. + // @param std::ostream &out - out stream. + // @param bool show_all - whether to show all information. + void Print(std::ostream &out, bool show_all) const override; + + // Function to count the number of samples in the QMNIST dataset. + // @param const std::string &dir - path to the QMNIST directory. + // @param const std::string &usage - Usage of this dataset, can be 'train', 'test', 'test10k', 'test50k', 'nist' or + // 'all'. + // @param int64_t *count - output arg that will hold the actual dataset size. + // @return Status -The status code returned. + static Status CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count); + + private: + // Load a tensor row according to a pair. + // @param row_id_type row_id - id for this tensor row. + // @param TensorRow row - image & label read into this tensor row. + // @return Status - The status code returned. + Status LoadTensorRow(row_id_type row_id, TensorRow *trow) override; + + // Get needed files in the folder_path_. + // @return Status - The status code returned. + Status WalkAllFiles() override; + + // Read images and labels from the file stream. + // @param std::ifstream *image_reader - image file stream. + // @param std::ifstream *label_reader - label file stream. + // @param size_t index - the index of file that is reading. + // @return Status The status code returned. + Status ReadImageAndLabel(std::ifstream *image_reader, std::ifstream *label_reader, size_t index) override; + + // Check label stream. + // @param const std::string &file_name - label file name. + // @param std::ifstream *label_reader - label file stream. + // @param uint32_t num_labels - returns the number of labels. + // @return Status The status code returned. + Status CheckLabel(const std::string &file_name, std::ifstream *label_reader, uint32_t *num_labels) override; + + const bool compat_; // compatible with mnist + + std::vector image_info_pairs_; +}; +} // namespace dataset +} // namespace mindspore::lite + +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_QMNIST_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/random_data_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/random_data_op.cc index 98f608ea..6e73750a 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/random_data_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/random_data_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/wait_post.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for RandomDataOp RandomDataOp::RandomDataOp(int32_t num_workers, int32_t op_connector_size, int64_t total_rows, @@ -185,4 +185,4 @@ Status RandomDataOp::PrepareData() { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/random_data_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/random_data_op.h index f7a075ee..557f3ac8 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/random_data_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/random_data_op.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // The RandomDataOp is a leaf node storage operator that generates random data based // on the schema specifications. Typically, it's used for testing and demonstrating @@ -125,6 +125,6 @@ class RandomDataOp : public MappableLeafOp { std::vector rows_; }; // class RandomDataOp } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_RANDOM_DATA_OP_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/rendered_sst2_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/rendered_sst2_op.cc index 359a3e92..3a0cfd00 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/rendered_sst2_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/rendered_sst2_op.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RenderedSST2Op::RenderedSST2Op(int32_t num_wkrs, const std::string &file_dir, const std::string &usage, int32_t queue_size, bool do_decode, const std::set &exts, @@ -389,4 +389,4 @@ Status RenderedSST2Op::InitPullMode() { return PrepareData(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/rendered_sst2_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/rendered_sst2_op.h index b9b3d1a0..87bb1cc7 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/rendered_sst2_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/rendered_sst2_op.h @@ -42,7 +42,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// Forward declares template @@ -172,5 +172,5 @@ class RenderedSST2Op : public MappableLeafOp { std::mutex access_mutex_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_RENDERED_SST2_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/distributed_sampler.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/distributed_sampler.cc index f9d72ba8..d0f58659 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/distributed_sampler.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/distributed_sampler.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { DistributedSamplerRT::DistributedSamplerRT(int64_t num_shards, int64_t shard_id, bool shuffle, int64_t num_samples, uint32_t seed, int64_t offset, bool even_dist) @@ -228,4 +228,4 @@ Status DistributedSamplerRT::to_json(nlohmann::json *out_json) { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/distributed_sampler.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/distributed_sampler.h index ca7982af..c60eeb4d 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/distributed_sampler.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/distributed_sampler.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DistributedSamplerRT : public SamplerRT { public: @@ -91,6 +91,6 @@ class DistributedSamplerRT : public SamplerRT { bool non_empty_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SAMPLER_DISTRIBUTED_SAMPLER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/pk_sampler.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/pk_sampler.cc index 9caae5bb..021fd485 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/pk_sampler.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/pk_sampler.cc @@ -18,7 +18,7 @@ #include #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { PKSamplerRT::PKSamplerRT(int64_t num_val, bool shuffle, int64_t num_samples, int64_t samples_per_tensor) : SamplerRT(num_samples, samples_per_tensor), @@ -151,4 +151,4 @@ Status PKSamplerRT::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/pk_sampler.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/pk_sampler.h index d9e41281..1cdb8c6f 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/pk_sampler.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/pk_sampler.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class PKSamplerRT : public SamplerRT { // NOT YET FINISHED public: @@ -83,6 +83,6 @@ class PKSamplerRT : public SamplerRT { // NOT YET FINISHED std::map> label_to_ids_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SAMPLER_PK_SAMPLER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/random_sampler.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/random_sampler.cc index 543f823d..2892812d 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/random_sampler.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/random_sampler.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandomSamplerRT::RandomSamplerRT(bool replacement, int64_t num_samples, bool reshuffle_each_epoch, int64_t samples_per_tensor) @@ -144,4 +144,4 @@ Status RandomSamplerRT::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/random_sampler.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/random_sampler.h index 0d05def3..edf155c2 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/random_sampler.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/random_sampler.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomSamplerRT : public SamplerRT { public: @@ -68,5 +68,5 @@ class RandomSamplerRT : public SamplerRT { bool reshuffle_each_epoch_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SAMPLER_RANDOM_SAMPLER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.cc index d814e873..8a2161fa 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.cc @@ -18,7 +18,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomAccessOp::GetNumRowsInDataset(int64_t *num) const { RETURN_UNEXPECTED_IF_NULL(num); @@ -218,4 +218,4 @@ Status SamplerRT::to_json(nlohmann::json *out_json) { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h index 1e8a0c4f..01a99e87 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/engine/data_schema.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // RandomAccessOp is a base class that all data-producing leaf operators // must inherit from if those leaf operator wish to support sampling. @@ -190,5 +190,5 @@ class SamplerRT { TensorRow child_ids_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SAMPLER_SAMPLER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.cc index 7195d557..2411d226 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.cc @@ -20,7 +20,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { SequentialSamplerRT::SequentialSamplerRT(int64_t start_index, int64_t num_samples, int64_t samples_per_tensor) : SamplerRT(num_samples, samples_per_tensor), @@ -152,4 +152,4 @@ Status SequentialSamplerRT::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h index 147e9716..a0697a13 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SequentialSamplerRT : public SamplerRT { public: @@ -73,6 +73,6 @@ class SequentialSamplerRT : public SamplerRT { int64_t index_produced_; // An internal counter that tracks how many ids have been produced }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SAMPLER_SEQUENTIAL_SAMPLER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/skip_first_epoch_sampler.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/skip_first_epoch_sampler.cc index e54fc11d..9173a475 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/skip_first_epoch_sampler.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/skip_first_epoch_sampler.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/distributed_sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { SkipFirstEpochSamplerRT::SkipFirstEpochSamplerRT(int64_t start_index, int64_t num_samples, int64_t samples_per_tensor) : SequentialSamplerRT(start_index, num_samples, samples_per_tensor), sample_need_to_skip_(start_index) {} @@ -191,4 +191,4 @@ Status SkipFirstEpochSamplerRT::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/skip_first_epoch_sampler.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/skip_first_epoch_sampler.h index a6384832..dd65d40b 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/skip_first_epoch_sampler.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/skip_first_epoch_sampler.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SkipFirstEpochSamplerRT : public SequentialSamplerRT { public: @@ -64,6 +64,6 @@ class SkipFirstEpochSamplerRT : public SequentialSamplerRT { bool first_epoch_done_ = false; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SAMPLER_SKIP_FIRST_EPOCH_SAMPLER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc index fb69cd44..d156e3fc 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_random_sampler.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/core/global_context.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor. SubsetRandomSamplerRT::SubsetRandomSamplerRT(const std::vector &indices, int64_t num_samples, @@ -71,4 +71,4 @@ Status SubsetRandomSamplerRT::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_random_sampler.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_random_sampler.h index 5b295afc..24800fd4 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_random_sampler.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_random_sampler.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// Randomly samples elements from a given list of indices, without replacement. class SubsetRandomSamplerRT : public SubsetSamplerRT { @@ -63,6 +63,6 @@ class SubsetRandomSamplerRT : public SubsetSamplerRT { std::mt19937 rand_gen_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SAMPLER_SUBSET_RANDOM_SAMPLER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_sampler.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_sampler.cc index 3b696852..51b80dd9 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_sampler.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_sampler.cc @@ -19,7 +19,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor. SubsetSamplerRT::SubsetSamplerRT(const std::vector &indices, int64_t num_samples, int64_t samples_per_tensor) @@ -140,4 +140,4 @@ int64_t SubsetSamplerRT::CalculateNumSamples(int64_t num_rows) { return res; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_sampler.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_sampler.h index 1e73e34b..a276f890 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_sampler.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/subset_sampler.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// Samples elements from a given list of indices. class SubsetSamplerRT : public SamplerRT { @@ -77,6 +77,6 @@ class SubsetSamplerRT : public SamplerRT { int64_t sample_id_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SAMPLER_SUBSET_SAMPLER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc index 455929c2..328711cb 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/core/global_context.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor. WeightedRandomSamplerRT::WeightedRandomSamplerRT(const std::vector &weights, int64_t num_samples, @@ -195,4 +195,4 @@ Status WeightedRandomSamplerRT::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.h index 406e1ca5..f46ffe10 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/weighted_random_sampler.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Samples elements from id `0, 1, ..., weights.size()-1` with given probabilities (weights). class WeightedRandomSamplerRT : public SamplerRT { @@ -93,6 +93,6 @@ class WeightedRandomSamplerRT : public SamplerRT { std::deque onepass_ids_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sbu_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sbu_op.cc index eec71b2b..e164b8c7 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sbu_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sbu_op.cc @@ -28,7 +28,7 @@ #include "utils/ms_utils.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { SBUOp::SBUOp(const std::string &folder_path, bool decode, std::unique_ptr data_schema, std::shared_ptr sampler, int32_t num_workers, int32_t queue_size) @@ -220,4 +220,4 @@ Status SBUOp::ReplaceAll(std::string *str, const std::string &from, const std::s return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sbu_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sbu_op.h index 6447f2de..88c3924b 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sbu_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sbu_op.h @@ -33,7 +33,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using SBUImageCaptionPair = std::pair; @@ -118,5 +118,5 @@ class SBUOp : public MappableLeafOp { std::vector image_caption_pairs_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SBU_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/semeion_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/semeion_op.cc index dc3dfff2..19763778 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/semeion_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/semeion_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr uint32_t kSemeionImageSize = 256; constexpr uint32_t kSemeionLabelSize = 10; @@ -175,4 +175,4 @@ Status SemeionOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/semeion_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/semeion_op.h index 1d44fd0f..4b541df7 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/semeion_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/semeion_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/util/path.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SemeionOp : public MappableLeafOp { public: @@ -88,5 +88,5 @@ class SemeionOp : public MappableLeafOp { std::vector semeionline_rows_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SEMEION_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.cc index 36975bd7..b3d7388c 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.cc @@ -1,52 +1,52 @@ -/** - * Copyright 2021-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.h" - -#include - -#include "include/common/debug/common.h" - -namespace mindspore { -namespace dataset { -SogouNewsOp::SogouNewsOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, - int32_t op_connector_size, bool shuffle_files, int32_t num_devices, int32_t device_id, - char field_delim, const std::vector> &column_default, - const std::vector &column_name, - const std::vector &sogou_news_files_list) - : CsvOp(sogou_news_files_list, field_delim, column_default, column_name, num_workers, num_samples, - worker_connector_size, op_connector_size, shuffle_files, num_devices, device_id) {} - -void SogouNewsOp::Print(std::ostream &out, bool show_all) const { - if (!show_all) { - // Call the super class for displaying any common 1-liner info. - ParallelOp::Print(out, show_all); - // Then show any custom derived-internal 1-liner info for this op. - out << "\n"; - } else { - // Call the super class for displaying any common detailed info. - ParallelOp::Print(out, show_all); - // Then show any custom derived-internal stuff. - out << "\nSample count: " << total_rows_ << "\nDevice id: " << device_id_ << "\nNumber of devices: " << num_devices_ - << "\nShuffle files: " << ((shuffle_files_) ? "yes" : "no") << "\nSogouNews files list:\n"; - for (int i = 0; i < csv_files_list_.size(); ++i) { - out << " " << csv_files_list_[i]; - } - out << "\n\n"; - } -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.h" + +#include + +#include "include/common/debug/common.h" + +namespace mindspore::lite { +namespace dataset { +SogouNewsOp::SogouNewsOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, + int32_t op_connector_size, bool shuffle_files, int32_t num_devices, int32_t device_id, + char field_delim, const std::vector> &column_default, + const std::vector &column_name, + const std::vector &sogou_news_files_list) + : CsvOp(sogou_news_files_list, field_delim, column_default, column_name, num_workers, num_samples, + worker_connector_size, op_connector_size, shuffle_files, num_devices, device_id) {} + +void SogouNewsOp::Print(std::ostream &out, bool show_all) const { + if (!show_all) { + // Call the super class for displaying any common 1-liner info. + ParallelOp::Print(out, show_all); + // Then show any custom derived-internal 1-liner info for this op. + out << "\n"; + } else { + // Call the super class for displaying any common detailed info. + ParallelOp::Print(out, show_all); + // Then show any custom derived-internal stuff. + out << "\nSample count: " << total_rows_ << "\nDevice id: " << device_id_ << "\nNumber of devices: " << num_devices_ + << "\nShuffle files: " << ((shuffle_files_) ? "yes" : "no") << "\nSogouNews files list:\n"; + for (int i = 0; i < csv_files_list_.size(); ++i) { + out << " " << csv_files_list_[i]; + } + out << "\n\n"; + } +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.h index 928faad1..66fdae94 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.h @@ -1,71 +1,71 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SOGOU_NEWS_OP_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SOGOU_NEWS_OP_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" - -namespace mindspore { -namespace dataset { -class JaggedConnector; - -/// \class SogouNewsOp -/// \brief A Op derived class to represent SogouNews Op. -class SogouNewsOp : public CsvOp { - public: - /// \brief Constructor of SogouNewsOp. - /// \param[in] num_workers Number of worker threads reading data from sogou_news files. - /// \param[in] num_samples The number of samples to be included in the dataset. - /// \param[in] worker_connector_size Size of each internal queue. - /// \param[in] op_connector_size Size of each queue in the connector that the child operator pulls from. - /// \param[in] shuffle_files Whether or not to shuffle the files before reading data. - /// \param[in] num_devices Number of devices that the dataset should be divided into. - /// \param[in] device_id The device ID within num_devices. - /// \param[in] field_delim A char that indicates the delimiter to separate fields. - /// \param[in] column_default List of default values for the CSV field (default={}). Each item in the list is - /// either a valid type (float, int, or string). - /// \param[in] column_name List of column names of the dataset. - /// \param[in] sogounews_files_list List of file paths for the dataset files. - SogouNewsOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, int32_t op_connector_size, - bool shuffle_files, int32_t num_devices, int32_t device_id, char field_delim, - const std::vector> &column_default, - const std::vector &column_name, const std::vector &sogou_news_files_list); - - /// \brief Destructor. - ~SogouNewsOp() = default; - - /// \brief A print method typically used for debugging. - /// \param[out] out The output stream to write output to. - /// \param[in] show_all A bool to control if you want to show all info or just a summary. - void Print(std::ostream &out, bool show_all) const override; - - /// \brief DatasetName name getter. - /// \param[in] upper A bool to control if you want to return uppercase or lowercase Op name. - /// \return DatasetName of the current Op. - std::string DatasetName(bool upper = false) const { return upper ? "SogouNews" : "sogou news"; } - - /// \brief Op name getter. - /// \return Name of the current Op. - std::string Name() const override { return "SogouNewsOp"; } -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SOGOU_NEWS_OP_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SOGOU_NEWS_OP_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SOGOU_NEWS_OP_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" + +namespace mindspore::lite { +namespace dataset { +class JaggedConnector; + +/// \class SogouNewsOp +/// \brief A Op derived class to represent SogouNews Op. +class SogouNewsOp : public CsvOp { + public: + /// \brief Constructor of SogouNewsOp. + /// \param[in] num_workers Number of worker threads reading data from sogou_news files. + /// \param[in] num_samples The number of samples to be included in the dataset. + /// \param[in] worker_connector_size Size of each internal queue. + /// \param[in] op_connector_size Size of each queue in the connector that the child operator pulls from. + /// \param[in] shuffle_files Whether or not to shuffle the files before reading data. + /// \param[in] num_devices Number of devices that the dataset should be divided into. + /// \param[in] device_id The device ID within num_devices. + /// \param[in] field_delim A char that indicates the delimiter to separate fields. + /// \param[in] column_default List of default values for the CSV field (default={}). Each item in the list is + /// either a valid type (float, int, or string). + /// \param[in] column_name List of column names of the dataset. + /// \param[in] sogounews_files_list List of file paths for the dataset files. + SogouNewsOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, int32_t op_connector_size, + bool shuffle_files, int32_t num_devices, int32_t device_id, char field_delim, + const std::vector> &column_default, + const std::vector &column_name, const std::vector &sogou_news_files_list); + + /// \brief Destructor. + ~SogouNewsOp() = default; + + /// \brief A print method typically used for debugging. + /// \param[out] out The output stream to write output to. + /// \param[in] show_all A bool to control if you want to show all info or just a summary. + void Print(std::ostream &out, bool show_all) const override; + + /// \brief DatasetName name getter. + /// \param[in] upper A bool to control if you want to return uppercase or lowercase Op name. + /// \return DatasetName of the current Op. + std::string DatasetName(bool upper = false) const { return upper ? "SogouNews" : "sogou news"; } + + /// \brief Op name getter. + /// \return Name of the current Op. + std::string Name() const override { return "SogouNewsOp"; } +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SOGOU_NEWS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/speech_commands_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/speech_commands_op.cc index 29e564ed..128ea5fe 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/speech_commands_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/speech_commands_op.cc @@ -27,7 +27,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr char kTestFiles[] = "testing_list.txt"; constexpr char kValFiles[] = "validation_list.txt"; @@ -206,4 +206,4 @@ Status SpeechCommandsOp::CountTotalRows(int64_t *num_rows) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/speech_commands_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/speech_commands_op.h index 8b8f5ca3..ae83d363 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/speech_commands_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/speech_commands_op.h @@ -32,7 +32,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SpeechCommandsOp : public MappableLeafOp { public: @@ -109,5 +109,5 @@ class SpeechCommandsOp : public MappableLeafOp { std::mutex mux_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SPEECH_COMMANDS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/squad_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/squad_op.cc index cc3ebc4f..26f64ff8 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/squad_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/squad_op.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { SQuADOp::SQuADOp(const std::string &dataset_dir, const std::string &usage, int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, std::unique_ptr schema, int32_t op_connector_size, @@ -366,4 +366,4 @@ Status SQuADOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/squad_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/squad_op.h index e586b564..466a1189 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/squad_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/squad_op.h @@ -31,7 +31,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.h" #include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class SQuADOp /// \brief Loading Operator of SQuAD Dataset. @@ -152,5 +152,5 @@ class SQuADOp : public NonMappableLeafOp { std::unique_ptr data_schema_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SQUAD_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sst2_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sst2_op.cc index e7e47dbb..5f25c581 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sst2_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sst2_op.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { SST2Op::SST2Op(const std::vector &dataset_files_list, const std::string &usage, char field_delim, const std::vector> &column_default, @@ -134,4 +134,4 @@ Status SST2Op::LoadFile(const std::string &file, int64_t start_offset, int64_t e return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sst2_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sst2_op.h index d73c55c9..2f2cff2d 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sst2_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sst2_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SST2Op : public CsvOp { public: @@ -81,5 +81,5 @@ class SST2Op : public CsvOp { std::string usage_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SST2_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/stl10_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/stl10_op.cc index 03abc9ac..30b86613 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/stl10_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/stl10_op.cc @@ -29,7 +29,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr uint32_t kSTLImageRows = 96; constexpr uint32_t kSTLImageCols = 96; @@ -414,4 +414,4 @@ Status STL10Op::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/stl10_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/stl10_op.h index 8a856136..35261522 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/stl10_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/stl10_op.h @@ -33,7 +33,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class STL10Op : public MappableLeafOp { public: @@ -114,5 +114,5 @@ class STL10Op : public MappableLeafOp { std::vector label_path_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_STL10_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sun397_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/sun397_op.cc index 6779a5f1..5107b8ed 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sun397_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sun397_op.cc @@ -28,7 +28,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr char kCategoriesMeta[] = "ClassName.txt"; @@ -233,4 +233,4 @@ Status SUN397Op::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/sun397_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/sun397_op.h index ea73ec57..3b7f2c3c 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/sun397_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/sun397_op.h @@ -33,7 +33,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Forward declares. template @@ -112,5 +112,5 @@ class SUN397Op : public MappableLeafOp { std::vector> image_path_label_pairs_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_SUN397_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.cc index ff614a26..cea967d4 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.cc @@ -1,313 +1,313 @@ -/** - * Copyright 2021-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.h" - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/core/config_manager.h" -#include "mindspore-lite/minddata/dataset/core/tensor_shape.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" -#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -#include "utils/file_utils.h" - -namespace mindspore { -namespace dataset { -TedliumOp::TedliumOp(const std::string &dataset_dir, const std::string &release, const std::string &usage, - const std::string &extensions, int32_t num_parallel_workers, - std::unique_ptr data_schema, std::shared_ptr sampler, int32_t queue_size) - : MappableLeafOp(num_parallel_workers, queue_size, std::move(sampler)), - dataset_dir_(dataset_dir), - release_(release), - usage_(usage), - extensions_(extensions), - data_schema_(std::move(data_schema)), - audio_files_({}), - usage_list_({}) {} - -void TedliumOp::Print(std::ostream &out, bool show_all) const { - if (!show_all) { - // Call the super class for displaying any common 1-liner info. - ParallelOp::Print(out, show_all); - // Then show any custom derived-internal 1-liner info for this op. - out << "\n"; - } else { - // Call the super class for displaying any common detailed info. - ParallelOp::Print(out, show_all); - // Then show any custom derived-internal stuff. - out << "\nNumber of rows: " << num_rows_ << "\nTedliumOp directory: " << dataset_dir_; - } -} - -Status TedliumOp::PrepareData() { - auto real_path = FileUtils::GetRealPath(dataset_dir_.c_str()); - if (!real_path.has_value()) { - RETURN_STATUS_UNEXPECTED("Invalid file, get real path failed, path=" + dataset_dir_); - } - Path root_folder(real_path.value()); - - if (release_ == "release1" || release_ == "release2") { - if (usage_ == "train" || usage_ == "test" || usage_ == "dev") { - usage_list_.push_back(usage_); - } else if (usage_ == "all") { - usage_list_ = {"train", "test", "dev"}; - } else { - RETURN_STATUS_UNEXPECTED( - "Invalid parameter, usage should be \"train\", \"test\", \"dev\" or \"all\" when " - "specify \"release1\" or \"release2\" , got " + - usage_); - } - for (int32_t i = 0; i < usage_list_.size(); ++i) { - Path stm_folder = root_folder / usage_list_[i] / "stm"; - RETURN_IF_NOT_OK(ReadStmFolderRows(stm_folder, usage_list_[i])); - } - } else if (release_ == "release3") { - if (usage_ == "all") { - Path stm_folder = root_folder / "data" / "stm"; - RETURN_IF_NOT_OK(ReadStmFolderRows(stm_folder, "data")); - } else { - RETURN_STATUS_UNEXPECTED("Invalid parameter, usage should be \"all\" when specify \"release3\" , got " + usage_); - } - } - std::sort(audio_files_.begin(), audio_files_.end()); - num_rows_ = audio_files_.size(); - if (num_rows_ == 0) { - RETURN_STATUS_UNEXPECTED( - "Invalid data, no valid data matching the dataset API TedliumDataset. Please check file path or dataset API."); - } - return Status::OK(); -} - -Status TedliumOp::ReadStmFolderRows(const Path &stm_folder, const std::string &release_usage) { - Path dir(stm_folder); - std::shared_ptr dirItr = Path::DirIterator::OpenDirectory(&dir); - if (!dir.Exists() || dirItr == nullptr) { - RETURN_STATUS_UNEXPECTED("Invalid file, failed to open folder: " + dir.ToString()); - } - MS_LOG(DEBUG) << "Tedlium " + release_ + " stm folder Path found: " << dir << "."; - while (dirItr->HasNext()) { - Path file = dirItr->Next(); - if (file.Extension() == ".stm") { - std::ifstream handle(file.ToString(), std::ios::in); - if (!handle.is_open()) { - RETURN_STATUS_UNEXPECTED("Invalid file, failed to open file: " + file.ToString()); - } - std::string line; - int32_t numline = 0; - while (getline(handle, line)) { - std::string filename = line.substr(0, line.find(" ")); - std::stringstream ss; - ss << numline; - audio_files_.push_back({ss.str(), filename, release_usage}); - ++numline; - } - handle.close(); - } - } - return Status::OK(); -} - -Status TedliumOp::ReadStm(const Path &file_stm_path, int32_t row_line, std::string *talk_id, std::string *speaker_id, - std::string *start_time, std::string *end_time, std::string *identifier, - std::string *transcript) { - std::ifstream handle(file_stm_path.ToString().c_str(), std::ios::in); - if (!handle.is_open()) { - RETURN_STATUS_UNEXPECTED("Invalid file, get real path failed, path=" + file_stm_path.ToString()); - } - std::string line; - int32_t i = 0; - while (i <= row_line && getline(handle, line)) { - ++i; - } - handle.close(); - std::vector temp; - i = 0; - const int32_t data_stm_number = 7; - // There are seven pieces of data in each row, which need to be read out and stored - // with a space as a separator. - // Talk_id, _, speaker_id, start_time, end_time, identifier, transcript. - // "_" is the data we don't need. - while (i < data_stm_number - 1) { - std::string s = line.substr(0, line.find(" ")); - temp.push_back(s); - line.erase(0, line.find(" ") + 1); // to delete space, so use s.find(" ") + 1. - ++i; - } - temp.push_back(line); - if (temp.size() != data_stm_number) { - RETURN_STATUS_UNEXPECTED("Invalid data, stm data was broken."); - } - - const int32_t talk_id_num = 0, speaker_id_num = 2, start_time_num = 3, end_time_num = 4, identifier_num = 5, - transcript_num = 6; - *talk_id = temp[talk_id_num]; - // temp[1] is "_", which is the data we don't need. - *speaker_id = temp[speaker_id_num]; - *start_time = temp[start_time_num]; - *end_time = temp[end_time_num]; - *identifier = temp[identifier_num]; - *transcript = temp[transcript_num]; - - return Status::OK(); -} - -Status TedliumOp::ReadSph(const Path &file_sph_path, double start_time, double end_time, int32_t *sample_rate, - std::vector *result) { - std::ifstream handle(file_sph_path.ToString().c_str(), std::ios::in | std::ios::binary); - if (!handle.is_open()) { - RETURN_STATUS_UNEXPECTED("Invalid file, failed to open file: " + file_sph_path.ToString()); - } - - char head[1024]; - handle.read(head, sizeof(head)); - CHECK_FAIL_RETURN_UNEXPECTED(!handle.fail(), - "Invalid data, failed to read head part from sph file: " + file_sph_path.ToString() + - ", re-download dataset(make sure the data is true)."); - std::vector vec; - for (int32_t i = 0, j = 0; i < strlen(head); ++i) { - if (head[i] == '\n' || head[i] == ' ') { - while (head[i + 1] == ' ') { - i++; - } - std::string strTemp(head + j, i - j); - vec.push_back(strTemp); - j = i + 1; - } - } - const int32_t dataToBytes = 2; - for (int32_t i = 0; i < vec.size(); ++i) { - if (vec[i] == "sample_rate") { - *sample_rate = atoi(vec[i + dataToBytes].c_str()); - } - } - - int32_t start = static_cast(start_time * (*sample_rate)); - int32_t end = static_cast(end_time * (*sample_rate)); - const int32_t size = (end - start); - std::vector temp(size * dataToBytes); - handle.seekg(start, std::ios::beg); - int32_t j = 0; - char c; - while (j < size * dataToBytes) { - handle.read(&c, 1); - CHECK_FAIL_RETURN_UNEXPECTED(!handle.fail(), - "Invalid data, failed to read data part from sph file: " + file_sph_path.ToString() + - ", re-download dataset(make sure the data is true)."); - temp.push_back(c); - ++j; - } - - const float kMaxVal = 32767.0; - for (int32_t i = 0; i < size; ++i) { - char bh = temp[2 * i]; - char bl = temp[2 * i + 1]; - // SPH audio files is big-endian, so we should convert the two bytes of data into int16_t based - // on the high 8 bits and the low 8 bits. - int16_t s = static_cast(((bh & 0x00FF) << 8) | (bl & 0x00FF)); - // Data normalization: Convert the data from the interval [-32768,32767] to the interval [-1,1]. - double t = s / kMaxVal; - (*result).push_back(t); - } - handle.close(); - - return Status::OK(); -} - -Status TedliumOp::LoadTensorRow(row_id_type row_id, TensorRow *row) { - int32_t row_line = atoi(audio_files_[row_id][0].c_str()); - std::string file_name = audio_files_[row_id][1]; - std::string file_usage_or3_none_ = audio_files_[row_id][2]; - Path dir_path(dataset_dir_); - Path file_stm_path = dir_path / file_usage_or3_none_ / "stm" / (file_name + ".stm"); - Path file_sph_path = dir_path / file_usage_or3_none_ / "sph" / (file_name + extensions_); - std::string talk_id, speaker_id, start_time, end_time, identifier, transcript; - std::vector result; - int32_t sample_rate; - RETURN_IF_NOT_OK( - ReadStm(file_stm_path, row_line, &talk_id, &speaker_id, &start_time, &end_time, &identifier, &transcript)); - RETURN_IF_NOT_OK(ReadSph(file_sph_path, atof(start_time.c_str()), atof(end_time.c_str()), &sample_rate, &result)); - - std::shared_ptr sample_rate_tensor, talk_id_tensor, speaker_id_tensor, identifier_tensor, transcript_tensor; - RETURN_IF_NOT_OK(Tensor::CreateScalar(sample_rate, &sample_rate_tensor)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(talk_id, &talk_id_tensor)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(speaker_id, &speaker_id_tensor)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(identifier, &identifier_tensor)); - RETURN_IF_NOT_OK(Tensor::CreateScalar(transcript, &transcript_tensor)); - - std::shared_ptr audio_tensor; - RETURN_IF_NOT_OK(Tensor::CreateFromVector(result, &audio_tensor)); - RETURN_IF_NOT_OK(audio_tensor->ExpandDim(0)); - (*row) = TensorRow(row_id, {audio_tensor, sample_rate_tensor, transcript_tensor, talk_id_tensor, speaker_id_tensor, - identifier_tensor}); - row->setPath({file_sph_path.ToString(), file_sph_path.ToString(), file_stm_path.ToString(), file_stm_path.ToString(), - file_stm_path.ToString(), file_stm_path.ToString()}); - - return Status::OK(); -} - -Status TedliumOp::CountTotalRows(const std::string &dataset_dir, const std::string &release, const std::string &usage, - const std::string &extensions, int64_t *count) { - // the logic of counting the number of samples is copied from PrepareData() - RETURN_UNEXPECTED_IF_NULL(count); - *count = 0; - const int64_t num_samples = 0; - const int64_t start_index = 0; - auto new_sampler = std::make_shared(start_index, num_samples); - - // build a new unique schema object - auto new_schema = std::make_unique(); - RETURN_IF_NOT_OK( - new_schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kFlexible, 1))); - TensorShape sample_rate_scalar = TensorShape::CreateScalar(); - TensorShape trans_scalar = TensorShape::CreateScalar(); - TensorShape talk_id_scalar = TensorShape::CreateScalar(); - TensorShape speaker_id_scalar = TensorShape::CreateScalar(); - TensorShape identi_scalar = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(new_schema->AddColumn( - ColDescriptor("sample_rate", DataType(DataType::DE_INT32), TensorImpl::kFlexible, 0, &sample_rate_scalar))); - RETURN_IF_NOT_OK(new_schema->AddColumn( - ColDescriptor("transcript", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &trans_scalar))); - RETURN_IF_NOT_OK(new_schema->AddColumn( - ColDescriptor("talk_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &talk_id_scalar))); - RETURN_IF_NOT_OK(new_schema->AddColumn( - ColDescriptor("speaker_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &speaker_id_scalar))); - RETURN_IF_NOT_OK(new_schema->AddColumn( - ColDescriptor("identifier", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &identi_scalar))); - - std::shared_ptr cfg = GlobalContext::config_manager(); - int32_t num_workers = cfg->num_parallel_workers(); - int32_t op_connect_size = cfg->op_connector_size(); - std::shared_ptr op = - std::make_shared(dataset_dir, release, usage, extensions, num_workers, std::move(new_schema), - std::move(new_sampler), op_connect_size); - RETURN_IF_NOT_OK(op->PrepareData()); - *count = static_cast(op->audio_files_.size()); - return Status::OK(); -} - -Status TedliumOp::ComputeColMap() { - if (column_name_id_map_.empty()) { - for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) { - column_name_id_map_[data_schema_->Column(i).Name()] = i; - } - } else { - MS_LOG(WARNING) << "Column name map is already set!"; - } - return Status::OK(); -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.h" + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/core/config_manager.h" +#include "mindspore-lite/minddata/dataset/core/tensor_shape.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sequential_sampler.h" +#include "mindspore-lite/minddata/dataset/engine/execution_tree.h" +#include "utils/file_utils.h" + +namespace mindspore::lite { +namespace dataset { +TedliumOp::TedliumOp(const std::string &dataset_dir, const std::string &release, const std::string &usage, + const std::string &extensions, int32_t num_parallel_workers, + std::unique_ptr data_schema, std::shared_ptr sampler, int32_t queue_size) + : MappableLeafOp(num_parallel_workers, queue_size, std::move(sampler)), + dataset_dir_(dataset_dir), + release_(release), + usage_(usage), + extensions_(extensions), + data_schema_(std::move(data_schema)), + audio_files_({}), + usage_list_({}) {} + +void TedliumOp::Print(std::ostream &out, bool show_all) const { + if (!show_all) { + // Call the super class for displaying any common 1-liner info. + ParallelOp::Print(out, show_all); + // Then show any custom derived-internal 1-liner info for this op. + out << "\n"; + } else { + // Call the super class for displaying any common detailed info. + ParallelOp::Print(out, show_all); + // Then show any custom derived-internal stuff. + out << "\nNumber of rows: " << num_rows_ << "\nTedliumOp directory: " << dataset_dir_; + } +} + +Status TedliumOp::PrepareData() { + auto real_path = FileUtils::GetRealPath(dataset_dir_.c_str()); + if (!real_path.has_value()) { + RETURN_STATUS_UNEXPECTED("Invalid file, get real path failed, path=" + dataset_dir_); + } + Path root_folder(real_path.value()); + + if (release_ == "release1" || release_ == "release2") { + if (usage_ == "train" || usage_ == "test" || usage_ == "dev") { + usage_list_.push_back(usage_); + } else if (usage_ == "all") { + usage_list_ = {"train", "test", "dev"}; + } else { + RETURN_STATUS_UNEXPECTED( + "Invalid parameter, usage should be \"train\", \"test\", \"dev\" or \"all\" when " + "specify \"release1\" or \"release2\" , got " + + usage_); + } + for (int32_t i = 0; i < usage_list_.size(); ++i) { + Path stm_folder = root_folder / usage_list_[i] / "stm"; + RETURN_IF_NOT_OK(ReadStmFolderRows(stm_folder, usage_list_[i])); + } + } else if (release_ == "release3") { + if (usage_ == "all") { + Path stm_folder = root_folder / "data" / "stm"; + RETURN_IF_NOT_OK(ReadStmFolderRows(stm_folder, "data")); + } else { + RETURN_STATUS_UNEXPECTED("Invalid parameter, usage should be \"all\" when specify \"release3\" , got " + usage_); + } + } + std::sort(audio_files_.begin(), audio_files_.end()); + num_rows_ = audio_files_.size(); + if (num_rows_ == 0) { + RETURN_STATUS_UNEXPECTED( + "Invalid data, no valid data matching the dataset API TedliumDataset. Please check file path or dataset API."); + } + return Status::OK(); +} + +Status TedliumOp::ReadStmFolderRows(const Path &stm_folder, const std::string &release_usage) { + Path dir(stm_folder); + std::shared_ptr dirItr = Path::DirIterator::OpenDirectory(&dir); + if (!dir.Exists() || dirItr == nullptr) { + RETURN_STATUS_UNEXPECTED("Invalid file, failed to open folder: " + dir.ToString()); + } + MS_LOG(DEBUG) << "Tedlium " + release_ + " stm folder Path found: " << dir << "."; + while (dirItr->HasNext()) { + Path file = dirItr->Next(); + if (file.Extension() == ".stm") { + std::ifstream handle(file.ToString(), std::ios::in); + if (!handle.is_open()) { + RETURN_STATUS_UNEXPECTED("Invalid file, failed to open file: " + file.ToString()); + } + std::string line; + int32_t numline = 0; + while (getline(handle, line)) { + std::string filename = line.substr(0, line.find(" ")); + std::stringstream ss; + ss << numline; + audio_files_.push_back({ss.str(), filename, release_usage}); + ++numline; + } + handle.close(); + } + } + return Status::OK(); +} + +Status TedliumOp::ReadStm(const Path &file_stm_path, int32_t row_line, std::string *talk_id, std::string *speaker_id, + std::string *start_time, std::string *end_time, std::string *identifier, + std::string *transcript) { + std::ifstream handle(file_stm_path.ToString().c_str(), std::ios::in); + if (!handle.is_open()) { + RETURN_STATUS_UNEXPECTED("Invalid file, get real path failed, path=" + file_stm_path.ToString()); + } + std::string line; + int32_t i = 0; + while (i <= row_line && getline(handle, line)) { + ++i; + } + handle.close(); + std::vector temp; + i = 0; + const int32_t data_stm_number = 7; + // There are seven pieces of data in each row, which need to be read out and stored + // with a space as a separator. + // Talk_id, _, speaker_id, start_time, end_time, identifier, transcript. + // "_" is the data we don't need. + while (i < data_stm_number - 1) { + std::string s = line.substr(0, line.find(" ")); + temp.push_back(s); + line.erase(0, line.find(" ") + 1); // to delete space, so use s.find(" ") + 1. + ++i; + } + temp.push_back(line); + if (temp.size() != data_stm_number) { + RETURN_STATUS_UNEXPECTED("Invalid data, stm data was broken."); + } + + const int32_t talk_id_num = 0, speaker_id_num = 2, start_time_num = 3, end_time_num = 4, identifier_num = 5, + transcript_num = 6; + *talk_id = temp[talk_id_num]; + // temp[1] is "_", which is the data we don't need. + *speaker_id = temp[speaker_id_num]; + *start_time = temp[start_time_num]; + *end_time = temp[end_time_num]; + *identifier = temp[identifier_num]; + *transcript = temp[transcript_num]; + + return Status::OK(); +} + +Status TedliumOp::ReadSph(const Path &file_sph_path, double start_time, double end_time, int32_t *sample_rate, + std::vector *result) { + std::ifstream handle(file_sph_path.ToString().c_str(), std::ios::in | std::ios::binary); + if (!handle.is_open()) { + RETURN_STATUS_UNEXPECTED("Invalid file, failed to open file: " + file_sph_path.ToString()); + } + + char head[1024]; + handle.read(head, sizeof(head)); + CHECK_FAIL_RETURN_UNEXPECTED(!handle.fail(), + "Invalid data, failed to read head part from sph file: " + file_sph_path.ToString() + + ", re-download dataset(make sure the data is true)."); + std::vector vec; + for (int32_t i = 0, j = 0; i < strlen(head); ++i) { + if (head[i] == '\n' || head[i] == ' ') { + while (head[i + 1] == ' ') { + i++; + } + std::string strTemp(head + j, i - j); + vec.push_back(strTemp); + j = i + 1; + } + } + const int32_t dataToBytes = 2; + for (int32_t i = 0; i < vec.size(); ++i) { + if (vec[i] == "sample_rate") { + *sample_rate = atoi(vec[i + dataToBytes].c_str()); + } + } + + int32_t start = static_cast(start_time * (*sample_rate)); + int32_t end = static_cast(end_time * (*sample_rate)); + const int32_t size = (end - start); + std::vector temp(size * dataToBytes); + handle.seekg(start, std::ios::beg); + int32_t j = 0; + char c; + while (j < size * dataToBytes) { + handle.read(&c, 1); + CHECK_FAIL_RETURN_UNEXPECTED(!handle.fail(), + "Invalid data, failed to read data part from sph file: " + file_sph_path.ToString() + + ", re-download dataset(make sure the data is true)."); + temp.push_back(c); + ++j; + } + + const float kMaxVal = 32767.0; + for (int32_t i = 0; i < size; ++i) { + char bh = temp[2 * i]; + char bl = temp[2 * i + 1]; + // SPH audio files is big-endian, so we should convert the two bytes of data into int16_t based + // on the high 8 bits and the low 8 bits. + int16_t s = static_cast(((bh & 0x00FF) << 8) | (bl & 0x00FF)); + // Data normalization: Convert the data from the interval [-32768,32767] to the interval [-1,1]. + double t = s / kMaxVal; + (*result).push_back(t); + } + handle.close(); + + return Status::OK(); +} + +Status TedliumOp::LoadTensorRow(row_id_type row_id, TensorRow *row) { + int32_t row_line = atoi(audio_files_[row_id][0].c_str()); + std::string file_name = audio_files_[row_id][1]; + std::string file_usage_or3_none_ = audio_files_[row_id][2]; + Path dir_path(dataset_dir_); + Path file_stm_path = dir_path / file_usage_or3_none_ / "stm" / (file_name + ".stm"); + Path file_sph_path = dir_path / file_usage_or3_none_ / "sph" / (file_name + extensions_); + std::string talk_id, speaker_id, start_time, end_time, identifier, transcript; + std::vector result; + int32_t sample_rate; + RETURN_IF_NOT_OK( + ReadStm(file_stm_path, row_line, &talk_id, &speaker_id, &start_time, &end_time, &identifier, &transcript)); + RETURN_IF_NOT_OK(ReadSph(file_sph_path, atof(start_time.c_str()), atof(end_time.c_str()), &sample_rate, &result)); + + std::shared_ptr sample_rate_tensor, talk_id_tensor, speaker_id_tensor, identifier_tensor, transcript_tensor; + RETURN_IF_NOT_OK(Tensor::CreateScalar(sample_rate, &sample_rate_tensor)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(talk_id, &talk_id_tensor)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(speaker_id, &speaker_id_tensor)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(identifier, &identifier_tensor)); + RETURN_IF_NOT_OK(Tensor::CreateScalar(transcript, &transcript_tensor)); + + std::shared_ptr audio_tensor; + RETURN_IF_NOT_OK(Tensor::CreateFromVector(result, &audio_tensor)); + RETURN_IF_NOT_OK(audio_tensor->ExpandDim(0)); + (*row) = TensorRow(row_id, {audio_tensor, sample_rate_tensor, transcript_tensor, talk_id_tensor, speaker_id_tensor, + identifier_tensor}); + row->setPath({file_sph_path.ToString(), file_sph_path.ToString(), file_stm_path.ToString(), file_stm_path.ToString(), + file_stm_path.ToString(), file_stm_path.ToString()}); + + return Status::OK(); +} + +Status TedliumOp::CountTotalRows(const std::string &dataset_dir, const std::string &release, const std::string &usage, + const std::string &extensions, int64_t *count) { + // the logic of counting the number of samples is copied from PrepareData() + RETURN_UNEXPECTED_IF_NULL(count); + *count = 0; + const int64_t num_samples = 0; + const int64_t start_index = 0; + auto new_sampler = std::make_shared(start_index, num_samples); + + // build a new unique schema object + auto new_schema = std::make_unique(); + RETURN_IF_NOT_OK( + new_schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kFlexible, 1))); + TensorShape sample_rate_scalar = TensorShape::CreateScalar(); + TensorShape trans_scalar = TensorShape::CreateScalar(); + TensorShape talk_id_scalar = TensorShape::CreateScalar(); + TensorShape speaker_id_scalar = TensorShape::CreateScalar(); + TensorShape identi_scalar = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(new_schema->AddColumn( + ColDescriptor("sample_rate", DataType(DataType::DE_INT32), TensorImpl::kFlexible, 0, &sample_rate_scalar))); + RETURN_IF_NOT_OK(new_schema->AddColumn( + ColDescriptor("transcript", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &trans_scalar))); + RETURN_IF_NOT_OK(new_schema->AddColumn( + ColDescriptor("talk_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &talk_id_scalar))); + RETURN_IF_NOT_OK(new_schema->AddColumn( + ColDescriptor("speaker_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &speaker_id_scalar))); + RETURN_IF_NOT_OK(new_schema->AddColumn( + ColDescriptor("identifier", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &identi_scalar))); + + std::shared_ptr cfg = GlobalContext::config_manager(); + int32_t num_workers = cfg->num_parallel_workers(); + int32_t op_connect_size = cfg->op_connector_size(); + std::shared_ptr op = + std::make_shared(dataset_dir, release, usage, extensions, num_workers, std::move(new_schema), + std::move(new_sampler), op_connect_size); + RETURN_IF_NOT_OK(op->PrepareData()); + *count = static_cast(op->audio_files_.size()); + return Status::OK(); +} + +Status TedliumOp::ComputeColMap() { + if (column_name_id_map_.empty()) { + for (int32_t i = 0; i < data_schema_->NumColumns(); ++i) { + column_name_id_map_[data_schema_->Column(i).Name()] = i; + } + } else { + MS_LOG(WARNING) << "Column name map is already set!"; + } + return Status::OK(); +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.h index 86c01374..bc33d940 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.h @@ -1,126 +1,126 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_TEDLIUM_OP_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_TEDLIUM_OP_H_ - -#include -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/core/tensor.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -#include "mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.h" - -#include "mindspore-lite/minddata/dataset/util/status.h" -#include "mindspore-lite/minddata/dataset/util/path.h" - -namespace mindspore { -namespace dataset { -class TedliumOp : public MappableLeafOp { - public: - /// \brief Constructor. - /// \param[in] dataset_dir Directory of tedlium dataset. - /// \param[in] release Release of tedlium dataset, can be 'release1', 'release2' or 'release3'. - /// \param[in] usage Usage of this dataset, if release is release3, can be '', else 'train', 'dev', 'test' or 'all'. - /// \param[in] extensions Extensions of the sph file, only '.sph' is valid. - /// \param[in] num_parallel_workers Number of workers in parallel. - /// \param[in] data_schema Schema of dataset. - /// \param[in] sampler Sampler tells TedliumOp what to read. - /// \param[in] queue_size Connector queue size. - TedliumOp(const std::string &dataset_dir, const std::string &release, const std::string &usage, - const std::string &extensions, int32_t num_parallel_workers, std::unique_ptr data_schema, - std::shared_ptr sampler, int32_t queue_size); - - /// \brief Destructor. - ~TedliumOp() = default; - - /// \brief A print method typically used for debugging. - /// \param[in] out Out stream. - /// \param[in] show_all Whether to show all information. - void Print(std::ostream &out, bool show_all) const override; - - /// \brief Op name getter. - std::string Name() const override { return "TedliumOp"; } - - /// \brief Initialize TedliumOp related var, calls the function to walk all files. - /// \return Status The status code returned. - Status PrepareData() override; - - /// \brief Function to count the number of samples in the TEDLIUM dataset. - /// \param[in] dataset_dir Directory of tedlium dataset. - /// \param[in] release Release of tedlium dataset. - /// \param[in] usage Usage of this dataset, if release is release3, can be '', else 'train', 'dev', 'test' or 'all'. - /// \param[in] extensions Extensions of the sph file, only '.sph' is valid. - /// \param[in] count Output arg that will hold the actual dataset size. - /// \return Status The status code returned. - static Status CountTotalRows(const std::string &dataset_dir, const std::string &release, const std::string &usage, - const std::string &extensions, int64_t *count); - - private: - /// \brief Read stm file. - /// \param[in] file_stm_path The path of stm file. - /// \param[in] row_line Which line of the file we need to read. - /// \param[out] talk_id Talk identifier of the row_line in the file. - /// \param[out] speaker_id Speaker identifier of the row_line in the file. - /// \param[out] start_time Start time of the row_line in the file. - /// \param[out] end_time End time of the row_line in the file. - /// \param[out] identifier Identifier of the row_line in the file. - /// \param[out] transcript Transcript of the row_line in the file. - /// \return Status The status code returned. - Status ReadStm(const Path &file_stm_path, int32_t row_line, std::string *talk_id, std::string *speaker_id, - std::string *start_time, std::string *end_time, std::string *identifier, std::string *transcript); - - /// \brief Read sph file. - /// \param[in] file_sph_path The path of sph file. - /// \param[in] start_time The start_time of row we need to use. - /// \param[in] end_time The end_time of row we need to use. - /// \param[out] sample_rate Sample rate of the row. - /// \param[out] result Waveform result vector of the row. - /// \return Status The status code returned. - Status ReadSph(const Path &file_sph_path, double start_time, double end_time, int32_t *sample_rate, - std::vector *result); - - /// \brief Read stm files according current release`s usage. - /// \param[in] stm_folder The folder of stm files. - /// \param[in] release_usage For release1 or release2, use usage_, for release3, "data". - /// \return Status The status code returned. - Status ReadStmFolderRows(const Path &stm_folder, const std::string &release_usage); - - /// \brief Load a tensor row according to a pair. - /// \param[in] row_id Id of row need to load. - /// \param[in] row Audio & label read into this tensor row. - /// \return Status The status code returned. - Status LoadTensorRow(row_id_type row_id, TensorRow *row) override; - - /// \brief Private function for computing the assignment of the column name map. - /// \return Status The status code returned. - Status ComputeColMap() override; - - const std::string release_; - const std::string dataset_dir_; - const std::string usage_; - const std::string extensions_; - std::unique_ptr data_schema_; - - std::vector > audio_files_; - std::vector usage_list_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_TEDLIUM_OP_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_TEDLIUM_OP_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_TEDLIUM_OP_H_ + +#include +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/core/tensor.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/mappable_leaf_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" +#include "mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.h" + +#include "mindspore-lite/minddata/dataset/util/status.h" +#include "mindspore-lite/minddata/dataset/util/path.h" + +namespace mindspore::lite { +namespace dataset { +class TedliumOp : public MappableLeafOp { + public: + /// \brief Constructor. + /// \param[in] dataset_dir Directory of tedlium dataset. + /// \param[in] release Release of tedlium dataset, can be 'release1', 'release2' or 'release3'. + /// \param[in] usage Usage of this dataset, if release is release3, can be '', else 'train', 'dev', 'test' or 'all'. + /// \param[in] extensions Extensions of the sph file, only '.sph' is valid. + /// \param[in] num_parallel_workers Number of workers in parallel. + /// \param[in] data_schema Schema of dataset. + /// \param[in] sampler Sampler tells TedliumOp what to read. + /// \param[in] queue_size Connector queue size. + TedliumOp(const std::string &dataset_dir, const std::string &release, const std::string &usage, + const std::string &extensions, int32_t num_parallel_workers, std::unique_ptr data_schema, + std::shared_ptr sampler, int32_t queue_size); + + /// \brief Destructor. + ~TedliumOp() = default; + + /// \brief A print method typically used for debugging. + /// \param[in] out Out stream. + /// \param[in] show_all Whether to show all information. + void Print(std::ostream &out, bool show_all) const override; + + /// \brief Op name getter. + std::string Name() const override { return "TedliumOp"; } + + /// \brief Initialize TedliumOp related var, calls the function to walk all files. + /// \return Status The status code returned. + Status PrepareData() override; + + /// \brief Function to count the number of samples in the TEDLIUM dataset. + /// \param[in] dataset_dir Directory of tedlium dataset. + /// \param[in] release Release of tedlium dataset. + /// \param[in] usage Usage of this dataset, if release is release3, can be '', else 'train', 'dev', 'test' or 'all'. + /// \param[in] extensions Extensions of the sph file, only '.sph' is valid. + /// \param[in] count Output arg that will hold the actual dataset size. + /// \return Status The status code returned. + static Status CountTotalRows(const std::string &dataset_dir, const std::string &release, const std::string &usage, + const std::string &extensions, int64_t *count); + + private: + /// \brief Read stm file. + /// \param[in] file_stm_path The path of stm file. + /// \param[in] row_line Which line of the file we need to read. + /// \param[out] talk_id Talk identifier of the row_line in the file. + /// \param[out] speaker_id Speaker identifier of the row_line in the file. + /// \param[out] start_time Start time of the row_line in the file. + /// \param[out] end_time End time of the row_line in the file. + /// \param[out] identifier Identifier of the row_line in the file. + /// \param[out] transcript Transcript of the row_line in the file. + /// \return Status The status code returned. + Status ReadStm(const Path &file_stm_path, int32_t row_line, std::string *talk_id, std::string *speaker_id, + std::string *start_time, std::string *end_time, std::string *identifier, std::string *transcript); + + /// \brief Read sph file. + /// \param[in] file_sph_path The path of sph file. + /// \param[in] start_time The start_time of row we need to use. + /// \param[in] end_time The end_time of row we need to use. + /// \param[out] sample_rate Sample rate of the row. + /// \param[out] result Waveform result vector of the row. + /// \return Status The status code returned. + Status ReadSph(const Path &file_sph_path, double start_time, double end_time, int32_t *sample_rate, + std::vector *result); + + /// \brief Read stm files according current release`s usage. + /// \param[in] stm_folder The folder of stm files. + /// \param[in] release_usage For release1 or release2, use usage_, for release3, "data". + /// \return Status The status code returned. + Status ReadStmFolderRows(const Path &stm_folder, const std::string &release_usage); + + /// \brief Load a tensor row according to a pair. + /// \param[in] row_id Id of row need to load. + /// \param[in] row Audio & label read into this tensor row. + /// \return Status The status code returned. + Status LoadTensorRow(row_id_type row_id, TensorRow *row) override; + + /// \brief Private function for computing the assignment of the column name map. + /// \return Status The status code returned. + Status ComputeColMap() override; + + const std::string release_; + const std::string dataset_dir_; + const std::string usage_; + const std::string extensions_; + std::unique_ptr data_schema_; + + std::vector > audio_files_; + std::vector usage_list_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_TEDLIUM_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.cc index 0a7c92d8..7f3928ac 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/wait_post.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { TextFileOp::TextFileOp(int32_t num_workers, int64_t total_rows, int32_t worker_connector_size, std::unique_ptr schema, std::vector text_files_list, @@ -255,4 +255,4 @@ Status TextFileOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h index b6b20cef..880b2d42 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h @@ -32,7 +32,7 @@ #include "mindspore-lite/minddata/dataset/util/wait_post.h" #include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using StringIndex = AutoIndexObj; @@ -120,5 +120,5 @@ class TextFileOp : public NonMappableLeafOp { std::unique_ptr data_schema_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_TEXT_FILE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/tf_reader_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/tf_reader_op.cc index 40833eb2..e116e17a 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/tf_reader_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/tf_reader_op.cc @@ -36,7 +36,7 @@ #include "utils/file_utils.h" #include "utils/system/crc32c.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { TFReaderOp::TFReaderOp(int32_t num_workers, int32_t worker_connector_size, int64_t total_num_rows, std::vector dataset_files_list, std::unique_ptr data_schema, @@ -1345,4 +1345,4 @@ Status TFReaderOp::GetNextRowPullMode(TensorRow *const row) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/tf_reader_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/tf_reader_op.h index 25756133..8dca595f 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/tf_reader_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/tf_reader_op.h @@ -45,7 +45,7 @@ class Feature; class BytesList; } // namespace dataengine -namespace mindspore { +namespace mindspore::lite { namespace dataset { const std::streamsize kTFRecordRecLenSize = sizeof(int64_t); const std::streamsize kTFRecordHeadFootSize = sizeof(int32_t); // header has same size with footer @@ -368,5 +368,5 @@ class TFReaderOp : public NonMappableLeafOp { bool decode_; // whether to parse the proto }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_TF_READER_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/udpos_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/udpos_op.cc index 1cc9dfb1..84dd2867 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/udpos_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/udpos_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/wait_post.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { UDPOSOp::UDPOSOp(int32_t num_workers, int64_t total_rows, int32_t worker_connector_size, std::unique_ptr schema, const std::vector &udpos_files_list, @@ -183,4 +183,4 @@ Status UDPOSOp::LoadFile(const std::string &file, int64_t start_offset, int64_t return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/udpos_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/udpos_op.h index 4dbdc350..9773e52d 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/udpos_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/udpos_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h" #include "mindspore-lite/minddata/dataset/util/queue.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class JaggedConnector; @@ -92,5 +92,5 @@ class UDPOSOp : public TextFileOp { Status LoadFile(const std::string &file, int64_t start_offset, int64_t end_offset, int32_t worker_id) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_UDPOS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/usps_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/usps_op.cc index a5362c4a..05b322c8 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/usps_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/usps_op.cc @@ -28,7 +28,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr int64_t kUSPSImageHeight = 16; constexpr int64_t kUSPSImageWidth = 16; @@ -359,4 +359,4 @@ Status USPSOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/usps_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/usps_op.h index b3d5b34a..59b30c12 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/usps_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/usps_op.h @@ -1,137 +1,137 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_USPS_OP_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_USPS_OP_H_ - -#include -#include -#include -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/core/tensor.h" -#include "mindspore-lite/minddata/dataset/engine/data_schema.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" -#include "mindspore-lite/minddata/dataset/util/path.h" -#include "mindspore-lite/minddata/dataset/util/queue.h" -#include "mindspore-lite/minddata/dataset/util/status.h" -#include "mindspore-lite/minddata/dataset/util/wait_post.h" -#include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" - -namespace mindspore { -namespace dataset { -class USPSOp : public NonMappableLeafOp { - public: - // Constructor. - // @param const std::string &dataset_dir - dir directory of USPS data file. - // @param const std::string &usage - Usage of this dataset, can be 'train', 'test' or 'all'. - // @param std::unique_ptr data_schema - the schema of the USPS dataset. - // @param num_workers - number of worker threads reading data from tf_file files. - // @param worker_connector_size - size of each internal queue. - // @param num_samples - number of samples to read. - // @param op_connector_size - size of each queue in the connector that the child operator pulls from. - // @param shuffle_files - whether to shuffle the files before reading data. - // @param num_devices - number of devices. - // @param device_id - device id. - USPSOp(const std::string &dataset_dir, const std::string &usage, std::unique_ptr data_schema, - int32_t num_workers, int32_t worker_connector_size, int64_t num_samples, int32_t op_connector_size, - bool shuffle_files, int32_t num_devices, int32_t device_id); - - // Destructor. - ~USPSOp() = default; - - // Op name getter. - // @return std::string - Name of the current Op. - std::string Name() const override { return "USPSOp"; } - - // A print method typically used for debugging. - // @param std::ostream &out - out stream. - // @param bool show_all - whether to show all information. - void Print(std::ostream &out, bool show_all) const override; - - // Instantiates the internal queues and connectors - // @return Status - the error code returned. - Status Init() override; - - // Function to count the number of samples in the USPS dataset. - // @param const std::string &dir - path to the USPS directory. - // @param const std::string &usage - Usage of this dataset, can be 'train', 'test' or 'all'. - // @param int64_t *count - output arg that will hold the minimum of the actual dataset size and numSamples. - // @return Status - the error coed returned. - static Status CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count); - - // File names getter. - // @return Vector of the input file names. - std::vector FileNames() { return data_files_list_; } - - private: - // Function to count the number of samples in one data file. - // @param const std::string &data_file - path to the data file. - // @return int64_t - the count result. - int64_t CountRows(const std::string &data_file) const; - - // Reads a data file and loads the data into multiple TensorRows. - // @param data_file - the data file to read. - // @param start_offset - the start offset of file. - // @param end_offset - the end offset of file. - // @param worker_id - the id of the worker that is executing this function. - // @return Status - the error code returned. - Status LoadFile(const std::string &data_file, int64_t start_offset, int64_t end_offset, int32_t worker_id) override; - - // Parses a single row and puts the data into a tensor table. - // @param line - the content of the row. - // @param trow - image & label read into this tensor row. - // @return Status - the error code returned. - Status LoadTensor(std::string *line, TensorRow *trow); - - // Calculate number of rows in each shard. - // @return Status - the error code returned. - Status CalculateNumRowsPerShard() override; - - // Fill the IOBlockQueue. - // @param i_keys - keys of file to fill to the IOBlockQueue. - // @return Status - the error code returned. - Status FillIOBlockQueue(const std::vector &i_keys) override; - - // Get all files in the dataset_dir_. - // @return Status - The status code returned. - Status GetFiles(); - - // Parse a line to image and label. - // @param line - the content of the row. - // @param images_buffer - image destination. - // @param labels_buffer - label destination. - // @return Status - the status code returned. - Status ParseLine(std::string *line, const std::unique_ptr &images_buffer, - const std::unique_ptr &labels_buffer) const; - - // Private function for computing the assignment of the column name map. - // @return Status - the error code returned. - Status ComputeColMap() override; - - const std::string usage_; // can be "all", "train" or "test". - std::string dataset_dir_; // directory of data files. - std::unique_ptr data_schema_; - - std::vector data_files_list_; -}; -} // namespace dataset -} // namespace mindspore - -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_USPS_OP_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_USPS_OP_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_USPS_OP_H_ + +#include +#include +#include +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/core/tensor.h" +#include "mindspore-lite/minddata/dataset/engine/data_schema.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/parallel_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/nonmappable_leaf_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/sampler.h" +#include "mindspore-lite/minddata/dataset/util/path.h" +#include "mindspore-lite/minddata/dataset/util/queue.h" +#include "mindspore-lite/minddata/dataset/util/status.h" +#include "mindspore-lite/minddata/dataset/util/wait_post.h" +#include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" + +namespace mindspore::lite { +namespace dataset { +class USPSOp : public NonMappableLeafOp { + public: + // Constructor. + // @param const std::string &dataset_dir - dir directory of USPS data file. + // @param const std::string &usage - Usage of this dataset, can be 'train', 'test' or 'all'. + // @param std::unique_ptr data_schema - the schema of the USPS dataset. + // @param num_workers - number of worker threads reading data from tf_file files. + // @param worker_connector_size - size of each internal queue. + // @param num_samples - number of samples to read. + // @param op_connector_size - size of each queue in the connector that the child operator pulls from. + // @param shuffle_files - whether to shuffle the files before reading data. + // @param num_devices - number of devices. + // @param device_id - device id. + USPSOp(const std::string &dataset_dir, const std::string &usage, std::unique_ptr data_schema, + int32_t num_workers, int32_t worker_connector_size, int64_t num_samples, int32_t op_connector_size, + bool shuffle_files, int32_t num_devices, int32_t device_id); + + // Destructor. + ~USPSOp() = default; + + // Op name getter. + // @return std::string - Name of the current Op. + std::string Name() const override { return "USPSOp"; } + + // A print method typically used for debugging. + // @param std::ostream &out - out stream. + // @param bool show_all - whether to show all information. + void Print(std::ostream &out, bool show_all) const override; + + // Instantiates the internal queues and connectors + // @return Status - the error code returned. + Status Init() override; + + // Function to count the number of samples in the USPS dataset. + // @param const std::string &dir - path to the USPS directory. + // @param const std::string &usage - Usage of this dataset, can be 'train', 'test' or 'all'. + // @param int64_t *count - output arg that will hold the minimum of the actual dataset size and numSamples. + // @return Status - the error coed returned. + static Status CountTotalRows(const std::string &dir, const std::string &usage, int64_t *count); + + // File names getter. + // @return Vector of the input file names. + std::vector FileNames() { return data_files_list_; } + + private: + // Function to count the number of samples in one data file. + // @param const std::string &data_file - path to the data file. + // @return int64_t - the count result. + int64_t CountRows(const std::string &data_file) const; + + // Reads a data file and loads the data into multiple TensorRows. + // @param data_file - the data file to read. + // @param start_offset - the start offset of file. + // @param end_offset - the end offset of file. + // @param worker_id - the id of the worker that is executing this function. + // @return Status - the error code returned. + Status LoadFile(const std::string &data_file, int64_t start_offset, int64_t end_offset, int32_t worker_id) override; + + // Parses a single row and puts the data into a tensor table. + // @param line - the content of the row. + // @param trow - image & label read into this tensor row. + // @return Status - the error code returned. + Status LoadTensor(std::string *line, TensorRow *trow); + + // Calculate number of rows in each shard. + // @return Status - the error code returned. + Status CalculateNumRowsPerShard() override; + + // Fill the IOBlockQueue. + // @param i_keys - keys of file to fill to the IOBlockQueue. + // @return Status - the error code returned. + Status FillIOBlockQueue(const std::vector &i_keys) override; + + // Get all files in the dataset_dir_. + // @return Status - The status code returned. + Status GetFiles(); + + // Parse a line to image and label. + // @param line - the content of the row. + // @param images_buffer - image destination. + // @param labels_buffer - label destination. + // @return Status - the status code returned. + Status ParseLine(std::string *line, const std::unique_ptr &images_buffer, + const std::unique_ptr &labels_buffer) const; + + // Private function for computing the assignment of the column name map. + // @return Status - the error code returned. + Status ComputeColMap() override; + + const std::string usage_; // can be "all", "train" or "test". + std::string dataset_dir_; // directory of data files. + std::unique_ptr data_schema_; + + std::vector data_files_list_; +}; +} // namespace dataset +} // namespace mindspore::lite + +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_USPS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/voc_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/voc_op.cc index b2586acb..c2f1786f 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/voc_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/voc_op.cc @@ -25,7 +25,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const char kColumnImage[] = "image"; const char kColumnTarget[] = "target"; @@ -377,4 +377,4 @@ Status VOCOp::GetClassIndexing(std::vector @@ -175,5 +175,5 @@ class VOCOp : public MappableLeafOp { #endif }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_VOC_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/wider_face_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/wider_face_op.cc index 872ac044..de9fd6c1 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/wider_face_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/wider_face_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/util/path.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr char kSplitPath[] = "wider_face_split"; constexpr char kTrainAnno[] = "wider_face_train_bbx_gt.txt"; @@ -301,4 +301,4 @@ Status WIDERFaceOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/wider_face_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/wider_face_op.h index 38b251ba..ee4720e7 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/wider_face_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/wider_face_op.h @@ -32,7 +32,7 @@ #include "mindspore-lite/minddata/dataset/util/queue.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class WIDERFaceOp : public MappableLeafOp { public: @@ -116,5 +116,5 @@ class WIDERFaceOp : public MappableLeafOp { std::map> annotation_map_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_WIDER_FACE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/wiki_text_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/wiki_text_op.cc index b940ed8f..f62303c6 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/wiki_text_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/wiki_text_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/io_block.h" #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { WikiTextOp::WikiTextOp(int32_t num_workers, int64_t total_rows, int32_t worker_connector_size, std::unique_ptr schema, const std::vector &file_list, @@ -50,4 +50,4 @@ void WikiTextOp::Print(std::ostream &out, bool show_all) const { } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/wiki_text_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/wiki_text_op.h index 2bac031a..20cc2265 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/wiki_text_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/wiki_text_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h" #include "mindspore-lite/minddata/dataset/util/queue.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class JaggedConnector; @@ -66,5 +66,5 @@ class WikiTextOp : public TextFileOp { std::string DatasetName(bool upper = false) const { return upper ? "WikiText" : "wiki text"; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_WIKI_TEXT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/yahoo_answers_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/yahoo_answers_op.cc index f7204d49..94bdac34 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/yahoo_answers_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/yahoo_answers_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/jagged_connector.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { YahooAnswersOp::YahooAnswersOp(const std::vector &dataset_files_list, char field_delim, const std::vector> &column_default, @@ -50,4 +50,4 @@ void YahooAnswersOp::Print(std::ostream &out, bool show_all) const { } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/yahoo_answers_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/yahoo_answers_op.h index 26270301..29e5a9b3 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/yahoo_answers_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/yahoo_answers_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class YahooAnswersOp : public CsvOp { public: @@ -67,5 +67,5 @@ class YahooAnswersOp : public CsvOp { std::string Name() const override { return "YahooAnswersOp"; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_YAHOO_ANSWERS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/yelp_review_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/yelp_review_op.cc index 6c870e14..fb125ed9 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/yelp_review_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/yelp_review_op.cc @@ -20,7 +20,7 @@ #include "include/common/debug/common.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { YelpReviewOp::YelpReviewOp(int32_t num_workers, int64_t num_samples, int32_t worker_connector_size, int32_t op_connector_size, bool shuffle_files, int32_t num_devices, int32_t device_id, @@ -49,4 +49,4 @@ void YelpReviewOp::Print(std::ostream &out, bool show_all) const { } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/yelp_review_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/yelp_review_op.h index 42f6e976..f3f8f0c9 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/yelp_review_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/yelp_review_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class JaggedConnector; @@ -67,5 +67,5 @@ class YelpReviewOp : public CsvOp { std::string Name() const override { return "YelpReviewOp"; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_YELP_REVIEW_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/yes_no_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/source/yes_no_op.cc index ef76f26a..6ae54406 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/yes_no_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/yes_no_op.cc @@ -28,7 +28,7 @@ #include "utils/file_utils.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr float kMaxShortVal = 32767.0; constexpr char kExtension[] = ".wav"; @@ -145,4 +145,4 @@ Status YesNoOp::ComputeColMap() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/source/yes_no_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/source/yes_no_op.h index 5ded1ee1..47730984 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/source/yes_no_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/source/yes_no_op.h @@ -33,7 +33,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class YesNoOp : public MappableLeafOp { public: @@ -88,5 +88,5 @@ class YesNoOp : public MappableLeafOp { std::unique_ptr data_schema_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_SOURCE_YES_NO_OP_H diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/take_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/take_op.cc index 633f3bb8..6a423075 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/take_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/take_op.cc @@ -18,7 +18,7 @@ #include "utils/ms_utils.h" #include "mindspore-lite/minddata/dataset/core/config_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor of the TakeOp. TakeOp::TakeOp(int32_t count) : PipelineOp(0), max_takes_(count), take_count_(0) {} @@ -89,4 +89,4 @@ Status TakeOp::GetNextRowPullMode(TensorRow *const row) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/take_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/take_op.h index a5ee522b..6e06d42e 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/take_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/take_op.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h" #include "mindspore-lite/minddata/dataset/engine/dataset_iterator.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TakeOp : public PipelineOp { public: @@ -86,6 +86,6 @@ class TakeOp : public PipelineOp { Status CommonGetNextRow(TensorRow *row, bool is_pull_mode); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_TAKE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/zip_op.cc b/mindspore-lite/minddata/dataset/engine/datasetops/zip_op.cc index 45b308fa..62d0587a 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/zip_op.cc +++ b/mindspore-lite/minddata/dataset/engine/datasetops/zip_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Construct ZipOp here, local variables initialized in operator due to tree construction restrictions ZipOp::ZipOp() : PipelineOp(0) {} @@ -154,4 +154,4 @@ Status ZipOp::GetNextRowPullMode(TensorRow *row) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/datasetops/zip_op.h b/mindspore-lite/minddata/dataset/engine/datasetops/zip_op.h index 01fe47f7..8f7cec64 100644 --- a/mindspore-lite/minddata/dataset/engine/datasetops/zip_op.h +++ b/mindspore-lite/minddata/dataset/engine/datasetops/zip_op.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/pipeline_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ZipOp : public PipelineOp { @@ -107,6 +107,6 @@ class ZipOp : public PipelineOp { Status ComputeColMap() override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_DATASETOPS_ZIP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/engine/execution_tree.cc b/mindspore-lite/minddata/dataset/engine/execution_tree.cc index 33b2df4a..34bbb377 100644 --- a/mindspore-lite/minddata/dataset/engine/execution_tree.cc +++ b/mindspore-lite/minddata/dataset/engine/execution_tree.cc @@ -28,7 +28,7 @@ #include "utils/ms_context.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor #ifdef WITH_BACKEND @@ -306,4 +306,4 @@ Status ExecutionTree::Prepare(bool is_pull_mode) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/execution_tree.h b/mindspore-lite/minddata/dataset/engine/execution_tree.h index 2d745d51..f25f9af1 100644 --- a/mindspore-lite/minddata/dataset/engine/execution_tree.h +++ b/mindspore-lite/minddata/dataset/engine/execution_tree.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/engine/perf/profiling.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares class TaskGroup; @@ -246,5 +246,5 @@ class ExecutionTree { #endif }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_EXECUTION_TREE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/gpu_item_connector.h b/mindspore-lite/minddata/dataset/engine/gpu_item_connector.h index 6b109952..8fbd71c9 100644 --- a/mindspore-lite/minddata/dataset/engine/gpu_item_connector.h +++ b/mindspore-lite/minddata/dataset/engine/gpu_item_connector.h @@ -27,7 +27,7 @@ using mindspore::device::DataQueueItem; -namespace mindspore { +namespace mindspore::lite { namespace dataset { struct GpuConnectorItem { @@ -86,5 +86,5 @@ class GpuConnector : public Connector { std::vector is_queue_finished_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_GPU_ITEM_CONNECTOR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.cc b/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.cc index 9f62e239..009a5573 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache_impl.h" #endif -namespace mindspore::dataset { +namespace mindspore::lite::dataset { #ifndef ENABLE_ANDROID Status DatasetCache::from_json(nlohmann::json json_obj, std::shared_ptr *cache) { if (json_obj.find("cache") != json_obj.end()) { @@ -59,4 +59,4 @@ Status DatasetCache::from_json(nlohmann::json json_obj, std::shared_ptr *cache); #endif }; -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_CACHE_DATASET_CACHE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache_impl.cc b/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache_impl.cc index ab6c37a5..49518a50 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache_impl.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache_impl.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/cache_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// Method to initialize the DatasetCache by creating an instance of a CacheClient /// \return Status Error code @@ -102,4 +102,4 @@ Status DatasetCacheImpl::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache_impl.h b/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache_impl.h index e337fe6a..d83ef9c0 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache_impl.h +++ b/mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache_impl.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/samplers_ir.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// DatasetCache is the IR of CacheClient class DatasetCacheImpl : public DatasetCache { @@ -88,5 +88,5 @@ class DatasetCacheImpl : public DatasetCache { std::optional prefetch_sz_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_CACHE_DATASET_CACHE_IMPL_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/cache/pre_built_dataset_cache.cc b/mindspore-lite/minddata/dataset/engine/ir/cache/pre_built_dataset_cache.cc index b13ce057..3c8d153a 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/cache/pre_built_dataset_cache.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/cache/pre_built_dataset_cache.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/cache_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// Method to initialize the DatasetCache by creating an instance of a CacheClient /// \return Status Error code @@ -28,4 +28,4 @@ Status PreBuiltDatasetCache::Build() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/cache/pre_built_dataset_cache.h b/mindspore-lite/minddata/dataset/engine/ir/cache/pre_built_dataset_cache.h index ccfe00a5..ba168fbd 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/cache/pre_built_dataset_cache.h +++ b/mindspore-lite/minddata/dataset/engine/ir/cache/pre_built_dataset_cache.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache_impl.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/samplers_ir.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// DatasetCache is the IR of CacheClient class PreBuiltDatasetCache : public DatasetCacheImpl { @@ -44,5 +44,5 @@ class PreBuiltDatasetCache : public DatasetCacheImpl { Status Build() override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_CACHE_PRE_BUILT_DATASET_CACHE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/batch_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/batch_node.cc index 71ba01cb..9a033bc3 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/batch_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/batch_node.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/batch_op.h" #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #ifdef ENABLE_PYTHON @@ -182,4 +182,4 @@ Status BatchNode::from_json(nlohmann::json json_obj, std::shared_ptr python_multiprocessing_runtime_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_BATCH_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/bucket_batch_by_length_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/bucket_batch_by_length_node.cc index e1816e28..5893cbd8 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/bucket_batch_by_length_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/bucket_batch_by_length_node.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/bucket_batch_by_length_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { BucketBatchByLengthNode::BucketBatchByLengthNode( std::shared_ptr child, const std::vector &column_names, @@ -144,4 +144,4 @@ Status BucketBatchByLengthNode::ValidateParams() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/bucket_batch_by_length_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/bucket_batch_by_length_node.h index 2de9b70a..1b399f44 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/bucket_batch_by_length_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/bucket_batch_by_length_node.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class BucketBatchByLengthNode : public DatasetNode { public: @@ -81,5 +81,5 @@ class BucketBatchByLengthNode : public DatasetNode { bool drop_remainder_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_BUCKET_BATCH_BY_LENGTH_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.cc index f2a67717..93bf5732 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { BuildSentenceVocabNode::BuildSentenceVocabNode(std::shared_ptr child, std::shared_ptr vocab, @@ -101,4 +101,4 @@ Status BuildSentenceVocabNode::AcceptAfter(IRNodePass *const p, bool *const modi return p->VisitAfter(shared_from_base(), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.h index dcabf6fe..017bdfa5 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_sentence_piece_vocab_node.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" #include "mindspore-lite/minddata/dataset/include/dataset/datasets.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class BuildSentenceVocabNode : public DatasetNode { public: @@ -88,5 +88,5 @@ class BuildSentenceVocabNode : public DatasetNode { std::unordered_map params_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // #ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_BUILD_SENTENCE_PIECE_VOCAB_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_vocab_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_vocab_node.cc index 8dd0c68d..b5f3eded 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_vocab_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_vocab_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { BuildVocabNode::BuildVocabNode(std::shared_ptr child, std::shared_ptr vocab, const std::vector &columns, const std::pair &freq_range, @@ -92,4 +92,4 @@ Status BuildVocabNode::AcceptAfter(IRNodePass *const p, bool *const modified) { return p->VisitAfter(shared_from_base(), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_vocab_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_vocab_node.h index d0a5b7c7..9526737b 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_vocab_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/build_vocab_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class BuildVocabNode : public DatasetNode { public: @@ -86,5 +86,5 @@ class BuildVocabNode : public DatasetNode { bool special_first_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_BUILD_VOCAB_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_lookup_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_lookup_node.cc index 8b95fc7a..ce024915 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_lookup_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_lookup_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { CacheLookupNode::CacheLookupNode(std::shared_ptr child, std::shared_ptr sampler, std::shared_ptr cache) @@ -78,4 +78,4 @@ Status CacheLookupNode::SamplerBuild(std::shared_ptr *const out) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_lookup_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_lookup_node.h index 400bcfa3..95bbc950 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_lookup_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_lookup_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/cache_lookup_op.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CacheLookupNode : public DatasetNode, public SamplerObj { public: @@ -86,5 +86,5 @@ class CacheLookupNode : public DatasetNode, public SamplerObj { std::shared_ptr lookup_node_copy_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_CACHE_LOOKUP_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_merge_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_merge_node.cc index 614a4791..39930df0 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_merge_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_merge_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/cache_merge_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { CacheMergeNode::CacheMergeNode(std::shared_ptr child, std::shared_ptr cache) : DatasetNode(std::move(cache)) { @@ -63,4 +63,4 @@ Status CacheMergeNode::AcceptAfter(IRNodePass *const p, bool *const modified) { return p->VisitAfter(shared_from_base(), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_merge_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_merge_node.h index e1b5baa0..3bf4b6b5 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_merge_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_merge_node.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CacheMergeNode : public DatasetNode { public: @@ -66,5 +66,5 @@ class CacheMergeNode : public DatasetNode { Status AcceptAfter(IRNodePass *const p, bool *const modified) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_CACHE_MERGE_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_node.cc index 0d743b32..84e50992 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { CacheNode::CacheNode(std::shared_ptr child, std::shared_ptr sampler, std::shared_ptr cache) @@ -67,4 +67,4 @@ Status CacheNode::AcceptAfter(IRNodePass *const p, bool *const modified) { return p->VisitAfter(shared_from_base(), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_node.h index 92908704..1ee61864 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/cache_node.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CacheNode : public DatasetNode { public: @@ -70,5 +70,5 @@ class CacheNode : public DatasetNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_CACHE_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/concat_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/concat_node.cc index 5a55c7a5..30cbeb2f 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/concat_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/concat_node.cc @@ -27,7 +27,7 @@ #endif #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Function to build ConcatOp @@ -184,4 +184,4 @@ Status ConcatNode::from_json(nlohmann::json json_obj, std::vector children_sizes_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_CONCAT_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/data_queue_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/data_queue_node.cc index 27d9d14f..a57f713d 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/data_queue_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/data_queue_node.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "utils/ms_context.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for DataQueueNode DataQueueNode::DataQueueNode(std::shared_ptr child, std::string queue_name, std::string device_type, @@ -138,4 +138,4 @@ Status DataQueueNode::from_json(nlohmann::json json_obj, std::shared_ptr child, int32_t num_epochs) : RepeatNode() { @@ -75,4 +75,4 @@ Status EpochCtrlNode::AcceptAfter(IRNodePass *const p, bool *const modified) { return p->VisitAfter(shared_from_base(), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/epoch_ctrl_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/epoch_ctrl_node.h index e28b5ab0..8d50980d 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/epoch_ctrl_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/epoch_ctrl_node.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/repeat_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class EpochCtrlNode : public RepeatNode { // Allow GeneratorNode to access internal members @@ -75,5 +75,5 @@ class EpochCtrlNode : public RepeatNode { Status AcceptAfter(IRNodePass *const p, bool *const modified) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_EPOCH_CTRL_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/filter_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/filter_node.cc index 2e7a6e61..090c55b4 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/filter_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/filter_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for FilterNode FilterNode::FilterNode(std::shared_ptr child, std::shared_ptr predicate, @@ -79,4 +79,4 @@ Status FilterNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/filter_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/filter_node.h index ae2b3e42..5050c12d 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/filter_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/filter_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class FilterNode : public DatasetNode { public: @@ -83,5 +83,5 @@ class FilterNode : public DatasetNode { std::vector input_columns_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_FILTER_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/map_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/map_node.cc index 5767f924..aaa93823 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/map_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/map_node.cc @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { MapNode::MapNode(std::shared_ptr child, std::vector> operations, @@ -258,4 +258,4 @@ Status MapNode::GetDatasetSize(const std::shared_ptr &size_ge } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/map_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/map_node.h index e798694e..0ade04ee 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/map_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/map_node.h @@ -26,7 +26,7 @@ #endif #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class MapNode : public DatasetNode { public: @@ -134,5 +134,5 @@ class MapNode : public DatasetNode { std::shared_ptr python_multiprocessing_runtime_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_MAP_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/project_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/project_node.cc index dd68ff86..1d0105c7 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/project_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/project_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Function to build ProjectOp ProjectNode::ProjectNode(std::shared_ptr child, const std::vector &columns) @@ -83,4 +83,4 @@ Status ProjectNode::AcceptAfter(IRNodePass *const p, bool *const modified) { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/project_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/project_node.h index 837da38a..06d6c625 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/project_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/project_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ProjectNode : public DatasetNode { public: @@ -86,5 +86,5 @@ class ProjectNode : public DatasetNode { std::vector columns_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_PROJECT_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/rename_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/rename_node.cc index a9a78e3f..40c646c2 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/rename_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/rename_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Function to build RenameOp @@ -92,4 +92,4 @@ Status RenameNode::AcceptAfter(IRNodePass *const p, bool *const modified) { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/rename_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/rename_node.h index 423fd956..120c322b 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/rename_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/rename_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RenameNode : public DatasetNode { public: @@ -89,5 +89,5 @@ class RenameNode : public DatasetNode { std::vector output_columns_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_RENAME_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/repeat_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/repeat_node.cc index af0e8152..290cc085 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/repeat_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/repeat_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RepeatNode::RepeatNode(std::shared_ptr child, int32_t count) : op_(nullptr), repeat_count_(count) { this->AddChild(child); @@ -109,4 +109,4 @@ Status RepeatNode::from_json(nlohmann::json json_obj, std::shared_ptr child) : DatasetNode() { @@ -77,4 +77,4 @@ Status RootNode::AcceptAfter(IRNodePass *const p, bool *const modified) { return p->VisitAfter(shared_from_base(), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/root_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/root_node.h index 7d960d05..a91ba965 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/root_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/root_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RootNode : public DatasetNode { @@ -94,5 +94,5 @@ class RootNode : public DatasetNode { int64_t dataset_size_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_ROOT_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/shuffle_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/shuffle_node.cc index c095777e..9d05b367 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/shuffle_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/shuffle_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for ShuffleNode @@ -89,4 +89,4 @@ Status ShuffleNode::AcceptAfter(IRNodePass *const p, bool *const modified) { return p->VisitAfter(shared_from_base(), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/shuffle_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/shuffle_node.h index 9ffefbf6..bafdb9b7 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/shuffle_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/shuffle_node.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ShuffleNode : public DatasetNode { public: @@ -92,5 +92,5 @@ class ShuffleNode : public DatasetNode { bool reset_every_epoch_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SHUFFLE_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/skip_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/skip_node.cc index 397d93ce..7aaa48e1 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/skip_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/skip_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for SkipNode SkipNode::SkipNode(int32_t count) : skip_count_(count) {} @@ -103,4 +103,4 @@ Status SkipNode::from_json(nlohmann::json json_obj, std::shared_ptr return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/skip_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/skip_node.h index d2447abc..7af0fb66 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/skip_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/skip_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SkipNode : public DatasetNode { public: @@ -107,5 +107,5 @@ class SkipNode : public DatasetNode { bool once_only_ = false; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SKIP_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/ag_news_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/ag_news_node.cc index 65e15172..ba346909 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/ag_news_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/ag_news_node.cc @@ -1,199 +1,199 @@ -/** - * Copyright 2021-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/ag_news_node.h" - -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.h" -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" -#include "mindspore-lite/minddata/dataset/util/status.h" - -namespace mindspore { -namespace dataset { -// Constructor for AGNewsNode. -AGNewsNode::AGNewsNode(const std::string &dataset_dir, int64_t num_samples, ShuffleMode shuffle, - const std::string &usage, int32_t num_shards, int32_t shard_id, - const std::shared_ptr &cache) - : NonMappableSourceNode(std::move(cache)), - dataset_dir_(dataset_dir), - num_samples_(num_samples), - shuffle_(shuffle), - num_shards_(num_shards), - shard_id_(shard_id), - usage_(usage), - ag_news_files_list_(WalkAllFiles(usage, dataset_dir)) { - GlobalContext::config_manager()->set_num_shards_for_auto_num_workers(num_shards_); -} - -std::shared_ptr AGNewsNode::Copy() { - auto node = - std::make_shared(dataset_dir_, num_samples_, shuffle_, usage_, num_shards_, shard_id_, cache_); - (void)node->SetNumWorkers(num_workers_); - (void)node->SetConnectorQueueSize(connector_que_size_); - return node; -} - -void AGNewsNode::Print(std::ostream &out) const { - out << (Name() + "(cache: " + ((cache_ != nullptr) ? "true" : "false") + - ", num_shards: " + std::to_string(num_shards_) + ", shard_id: " + std::to_string(shard_id_) + ")"); -} - -Status AGNewsNode::ValidateParams() { - RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("AGNewsDataset", dataset_dir_)); - RETURN_IF_NOT_OK(ValidateStringValue("AGNewsDataset", usage_, {"train", "test", "all"})); - RETURN_IF_NOT_OK(ValidateScalar("AGNewsDataset", "num_samples", num_samples_, {0}, false)); - RETURN_IF_NOT_OK(ValidateDatasetShardParams("AGNewsDataset", num_shards_, shard_id_)); - RETURN_IF_NOT_OK(ValidateEnum("AGNewsDataset", "ShuffleMode", shuffle_, - {ShuffleMode::kFalse, ShuffleMode::kFiles, ShuffleMode::kGlobal})); - - if (!column_names_.empty()) { - RETURN_IF_NOT_OK(ValidateDatasetColumnParam("AGNewsDataset", "column_names", column_names_)); - } - return Status::OK(); -} - -// Function to build AGNewsNode. -Status AGNewsNode::Build(std::vector> *const node_ops) { - bool shuffle_files = (shuffle_ == ShuffleMode::kGlobal || shuffle_ == ShuffleMode::kFiles); - // Sort the dataset files in a lexicographical order. - std::vector sorted_dataset_files = ag_news_files_list_; - std::sort(sorted_dataset_files.begin(), sorted_dataset_files.end()); - // Because AGNews does not have external column_defaults nor column_names parameters, - // they need to be set before AGNewsOp is initialized. - // AGNews data set is formatted as three columns of data, so three columns are added. - std::vector> column_default; - column_default.push_back(std::make_shared>(AGNewsOp::STRING, "")); - column_default.push_back(std::make_shared>(AGNewsOp::STRING, "")); - column_default.push_back(std::make_shared>(AGNewsOp::STRING, "")); - std::vector column_name = {"index", "title", "description"}; - // AGNews data values are always delimited by a comma. - char field_delim_ = ','; - std::shared_ptr ag_news_op = - std::make_shared(num_workers_, num_samples_, worker_connector_size_, connector_que_size_, shuffle_files, - num_shards_, shard_id_, field_delim_, column_default, column_name, sorted_dataset_files); - RETURN_IF_NOT_OK(ag_news_op->Init()); - if (shuffle_ == ShuffleMode::kGlobal) { - // Inject ShuffleOp. - std::shared_ptr shuffle_op = nullptr; - int64_t num_rows = 0; - // First, get the number of rows in the dataset. - RETURN_IF_NOT_OK(AGNewsOp::CountAllFileRows(ag_news_files_list_, false, &num_rows)); - // Add the shuffle op after this op. - RETURN_IF_NOT_OK( - AddShuffleOp(sorted_dataset_files.size(), num_shards_, num_rows, 0, connector_que_size_, &shuffle_op)); - shuffle_op->SetTotalRepeats(GetTotalRepeats()); - shuffle_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - shuffle_op->Skip(skip_steps_); - node_ops->push_back(shuffle_op); - } - ag_news_op->SetTotalRepeats(GetTotalRepeats()); - ag_news_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - node_ops->push_back(ag_news_op); - return Status::OK(); -} - -// Get the shard id of node. -Status AGNewsNode::GetShardId(int32_t *shard_id) { - *shard_id = shard_id_; - return Status::OK(); -} - -// Get Dataset size. -Status AGNewsNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) { - if (dataset_size_ > 0) { - *dataset_size = dataset_size_; - return Status::OK(); - } - - int64_t num_rows, sample_size; - RETURN_IF_NOT_OK(AGNewsOp::CountAllFileRows(ag_news_files_list_, false, &num_rows)); - sample_size = num_samples_; - num_rows = static_cast(ceil(num_rows / (1.0 * num_shards_))); - *dataset_size = sample_size > 0 ? std::min(num_rows, sample_size) : num_rows; - dataset_size_ = *dataset_size; - return Status::OK(); -} - -Status AGNewsNode::to_json(nlohmann::json *out_json) { - nlohmann::json args; - args["num_parallel_workers"] = num_workers_; - args["connector_queue_size"] = connector_que_size_; - args["dataset_dir"] = dataset_dir_; - args["usage"] = usage_; - args["num_samples"] = num_samples_; - args["shuffle"] = shuffle_; - args["num_shards"] = num_shards_; - args["shard_id"] = shard_id_; - if (cache_ != nullptr) { - nlohmann::json cache_args; - RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); - args["cache"] = cache_args; - } - *out_json = args; - return Status::OK(); -} - -// Note: The following two functions are common among NonMappableSourceNode and -// should be promoted to its parent class. AGNews (for which internally is based off CSV) -// by itself is a non-mappable dataset that does not support sampling. -// However, if a cache operator is injected at some other place higher in the tree, -// that cache can inherit this sampler from the leaf, providing sampling support from -// the caching layer. -// Should be promoted to its parent class. -// That is why we setup the sampler for a leaf node that does not use sampling. -Status AGNewsNode::SetupSamplerForCache(std::shared_ptr *sampler) { - *sampler = SelectSampler(num_samples_, shuffle_, num_shards_, shard_id_); - return Status::OK(); -} - -// If a cache has been added into the ascendant tree over this AGNews node, then -// the cache will be executing a sampler for fetching the data. As such, any -// options in the AGNews node need to be reset to its defaults so that this -// AGNews node will produce the full set of data into the cache. -Status AGNewsNode::MakeSimpleProducer() { - shard_id_ = 0; - num_shards_ = 1; - shuffle_ = ShuffleMode::kFalse; - num_samples_ = 0; - return Status::OK(); -} - -std::vector AGNewsNode::WalkAllFiles(const std::string &usage, const std::string &dataset_dir) { - std::vector ag_news_files_list; - Path train_prefix("train.csv"); - Path test_prefix("test.csv"); - Path dir(dataset_dir); - - if (usage == "train") { - Path temp_path = dir / train_prefix; - ag_news_files_list.push_back(temp_path.ToString()); - } else if (usage == "test") { - Path temp_path = dir / test_prefix; - ag_news_files_list.push_back(temp_path.ToString()); - } else { - Path temp_path = dir / train_prefix; - ag_news_files_list.push_back(temp_path.ToString()); - Path temp_path1 = dir / test_prefix; - ag_news_files_list.push_back(temp_path1.ToString()); - } - return ag_news_files_list; -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/ag_news_node.h" + +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/ag_news_op.h" +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" +#include "mindspore-lite/minddata/dataset/util/status.h" + +namespace mindspore::lite { +namespace dataset { +// Constructor for AGNewsNode. +AGNewsNode::AGNewsNode(const std::string &dataset_dir, int64_t num_samples, ShuffleMode shuffle, + const std::string &usage, int32_t num_shards, int32_t shard_id, + const std::shared_ptr &cache) + : NonMappableSourceNode(std::move(cache)), + dataset_dir_(dataset_dir), + num_samples_(num_samples), + shuffle_(shuffle), + num_shards_(num_shards), + shard_id_(shard_id), + usage_(usage), + ag_news_files_list_(WalkAllFiles(usage, dataset_dir)) { + GlobalContext::config_manager()->set_num_shards_for_auto_num_workers(num_shards_); +} + +std::shared_ptr AGNewsNode::Copy() { + auto node = + std::make_shared(dataset_dir_, num_samples_, shuffle_, usage_, num_shards_, shard_id_, cache_); + (void)node->SetNumWorkers(num_workers_); + (void)node->SetConnectorQueueSize(connector_que_size_); + return node; +} + +void AGNewsNode::Print(std::ostream &out) const { + out << (Name() + "(cache: " + ((cache_ != nullptr) ? "true" : "false") + + ", num_shards: " + std::to_string(num_shards_) + ", shard_id: " + std::to_string(shard_id_) + ")"); +} + +Status AGNewsNode::ValidateParams() { + RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("AGNewsDataset", dataset_dir_)); + RETURN_IF_NOT_OK(ValidateStringValue("AGNewsDataset", usage_, {"train", "test", "all"})); + RETURN_IF_NOT_OK(ValidateScalar("AGNewsDataset", "num_samples", num_samples_, {0}, false)); + RETURN_IF_NOT_OK(ValidateDatasetShardParams("AGNewsDataset", num_shards_, shard_id_)); + RETURN_IF_NOT_OK(ValidateEnum("AGNewsDataset", "ShuffleMode", shuffle_, + {ShuffleMode::kFalse, ShuffleMode::kFiles, ShuffleMode::kGlobal})); + + if (!column_names_.empty()) { + RETURN_IF_NOT_OK(ValidateDatasetColumnParam("AGNewsDataset", "column_names", column_names_)); + } + return Status::OK(); +} + +// Function to build AGNewsNode. +Status AGNewsNode::Build(std::vector> *const node_ops) { + bool shuffle_files = (shuffle_ == ShuffleMode::kGlobal || shuffle_ == ShuffleMode::kFiles); + // Sort the dataset files in a lexicographical order. + std::vector sorted_dataset_files = ag_news_files_list_; + std::sort(sorted_dataset_files.begin(), sorted_dataset_files.end()); + // Because AGNews does not have external column_defaults nor column_names parameters, + // they need to be set before AGNewsOp is initialized. + // AGNews data set is formatted as three columns of data, so three columns are added. + std::vector> column_default; + column_default.push_back(std::make_shared>(AGNewsOp::STRING, "")); + column_default.push_back(std::make_shared>(AGNewsOp::STRING, "")); + column_default.push_back(std::make_shared>(AGNewsOp::STRING, "")); + std::vector column_name = {"index", "title", "description"}; + // AGNews data values are always delimited by a comma. + char field_delim_ = ','; + std::shared_ptr ag_news_op = + std::make_shared(num_workers_, num_samples_, worker_connector_size_, connector_que_size_, shuffle_files, + num_shards_, shard_id_, field_delim_, column_default, column_name, sorted_dataset_files); + RETURN_IF_NOT_OK(ag_news_op->Init()); + if (shuffle_ == ShuffleMode::kGlobal) { + // Inject ShuffleOp. + std::shared_ptr shuffle_op = nullptr; + int64_t num_rows = 0; + // First, get the number of rows in the dataset. + RETURN_IF_NOT_OK(AGNewsOp::CountAllFileRows(ag_news_files_list_, false, &num_rows)); + // Add the shuffle op after this op. + RETURN_IF_NOT_OK( + AddShuffleOp(sorted_dataset_files.size(), num_shards_, num_rows, 0, connector_que_size_, &shuffle_op)); + shuffle_op->SetTotalRepeats(GetTotalRepeats()); + shuffle_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + shuffle_op->Skip(skip_steps_); + node_ops->push_back(shuffle_op); + } + ag_news_op->SetTotalRepeats(GetTotalRepeats()); + ag_news_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + node_ops->push_back(ag_news_op); + return Status::OK(); +} + +// Get the shard id of node. +Status AGNewsNode::GetShardId(int32_t *shard_id) { + *shard_id = shard_id_; + return Status::OK(); +} + +// Get Dataset size. +Status AGNewsNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) { + if (dataset_size_ > 0) { + *dataset_size = dataset_size_; + return Status::OK(); + } + + int64_t num_rows, sample_size; + RETURN_IF_NOT_OK(AGNewsOp::CountAllFileRows(ag_news_files_list_, false, &num_rows)); + sample_size = num_samples_; + num_rows = static_cast(ceil(num_rows / (1.0 * num_shards_))); + *dataset_size = sample_size > 0 ? std::min(num_rows, sample_size) : num_rows; + dataset_size_ = *dataset_size; + return Status::OK(); +} + +Status AGNewsNode::to_json(nlohmann::json *out_json) { + nlohmann::json args; + args["num_parallel_workers"] = num_workers_; + args["connector_queue_size"] = connector_que_size_; + args["dataset_dir"] = dataset_dir_; + args["usage"] = usage_; + args["num_samples"] = num_samples_; + args["shuffle"] = shuffle_; + args["num_shards"] = num_shards_; + args["shard_id"] = shard_id_; + if (cache_ != nullptr) { + nlohmann::json cache_args; + RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); + args["cache"] = cache_args; + } + *out_json = args; + return Status::OK(); +} + +// Note: The following two functions are common among NonMappableSourceNode and +// should be promoted to its parent class. AGNews (for which internally is based off CSV) +// by itself is a non-mappable dataset that does not support sampling. +// However, if a cache operator is injected at some other place higher in the tree, +// that cache can inherit this sampler from the leaf, providing sampling support from +// the caching layer. +// Should be promoted to its parent class. +// That is why we setup the sampler for a leaf node that does not use sampling. +Status AGNewsNode::SetupSamplerForCache(std::shared_ptr *sampler) { + *sampler = SelectSampler(num_samples_, shuffle_, num_shards_, shard_id_); + return Status::OK(); +} + +// If a cache has been added into the ascendant tree over this AGNews node, then +// the cache will be executing a sampler for fetching the data. As such, any +// options in the AGNews node need to be reset to its defaults so that this +// AGNews node will produce the full set of data into the cache. +Status AGNewsNode::MakeSimpleProducer() { + shard_id_ = 0; + num_shards_ = 1; + shuffle_ = ShuffleMode::kFalse; + num_samples_ = 0; + return Status::OK(); +} + +std::vector AGNewsNode::WalkAllFiles(const std::string &usage, const std::string &dataset_dir) { + std::vector ag_news_files_list; + Path train_prefix("train.csv"); + Path test_prefix("test.csv"); + Path dir(dataset_dir); + + if (usage == "train") { + Path temp_path = dir / train_prefix; + ag_news_files_list.push_back(temp_path.ToString()); + } else if (usage == "test") { + Path temp_path = dir / test_prefix; + ag_news_files_list.push_back(temp_path.ToString()); + } else { + Path temp_path = dir / train_prefix; + ag_news_files_list.push_back(temp_path.ToString()); + Path temp_path1 = dir / test_prefix; + ag_news_files_list.push_back(temp_path1.ToString()); + } + return ag_news_files_list; +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/ag_news_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/ag_news_node.h index 41dcdc8e..84683c25 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/ag_news_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/ag_news_node.h @@ -1,126 +1,126 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AG_NEWS_NODE_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AG_NEWS_NODE_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" - -namespace mindspore { -namespace dataset { -/// \brief class AGNewsNode. -/// \brief Dataset derived class to represent AGNews dataset. -class AGNewsNode : public NonMappableSourceNode { - public: - /// \brief Constructor. - AGNewsNode(const std::string &dataset_dir, int64_t num_samples, ShuffleMode shuffle, const std::string &usage, - int32_t num_shards, int32_t shard_id, const std::shared_ptr &cache); - - /// \brief Destructor. - ~AGNewsNode() override = default; - - /// \brief Node name getter. - /// \return Name of the current node. - std::string Name() const override { return kAGNewsNode; } - - /// \brief Print the description. - /// \param[in] out The output stream to write output to. - void Print(std::ostream &out) const override; - - /// \brief Copy the node to a new object. - /// \return A shared pointer to the new copy. - std::shared_ptr Copy() override; - - /// \brief A base class override function to create the required runtime dataset op objects for this class. - /// \param[in] node_ops A vector containing shared pointer to the Dataset Ops that this object will create. - /// \return Status Status::OK() if build successfully. - Status Build(std::vector> *const node_ops) override; - - /// \brief Parameters validation. - /// \return Status Status::OK() if all the parameters are valid. - Status ValidateParams() override; - - /// \brief Get the shard id of node. - /// \param[in] shard_id The shard id. - /// \return Status Status::OK() if get shard id successfully. - Status GetShardId(int32_t *shard_id) override; - - /// \brief Getter functions. - const std::string &DatasetDir() const { return dataset_dir_; } - const std::string &Usage() const { return usage_; } - int64_t NumSamples() const { return num_samples_; } - ShuffleMode Shuffle() const { return shuffle_; } - int32_t NumShards() const { return num_shards_; } - int32_t ShardId() const { return shard_id_; } - - /// \brief Base-class override for GetDatasetSize. - /// \param[in] size_getter Shared pointer to DatasetSizeGetter. - /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting - /// dataset size at the expense of accuracy. - /// \param[out] dataset_size the size of the dataset. - /// \return Status of the function. - Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) override; - - /// \brief Get the arguments of node - /// \param[out] out_json JSON string of all attributes - /// \return Status of the function - Status to_json(nlohmann::json *out_json) override; - - /// \brief AGNews by itself is a non-mappable dataset that does not support sampling. - /// However, if a cache operator is injected at some other place higher in - /// the tree, that cache can inherit this sampler from the leaf, providing - /// sampling support from the caching layer. That is why we setup the - /// sampler for a leaf node that does not use sampling. Note: This - /// function is common among NonMappableSourceNode and should be promoted - /// to its parent class. - /// \param[in] sampler The sampler to setup. - /// \return Status of the function. - Status SetupSamplerForCache(std::shared_ptr *sampler) override; - - /// \brief If a cache has been added into the ascendant tree over this ag_news node, - /// then the cache will be executing a sampler for fetching the data. - /// As such, any options in the AGNews node need to be reset to its defaults - /// so that this AGNews node will produce the full set of data into the cache. - /// Note: This function is common among NonMappableSourceNode and should be promoted to its - /// parent class. - /// \return Status of the function. - Status MakeSimpleProducer() override; - - /// \brief Generate a list of read file names according to usage. - /// \param[in] usage Part of dataset of AGNews. - /// \param[in] dataset_dir Path to the root directory that contains the dataset. - /// \return std::vector A list of read file names. - std::vector WalkAllFiles(const std::string &usage, const std::string &dataset_dir); - - private: - std::string dataset_dir_; - std::string usage_; - std::vector> column_defaults_; - std::vector column_names_; - int64_t num_samples_; - ShuffleMode shuffle_; - int32_t num_shards_; - int32_t shard_id_; - std::vector ag_news_files_list_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AG_NEWS_NODE_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AG_NEWS_NODE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AG_NEWS_NODE_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" + +namespace mindspore::lite { +namespace dataset { +/// \brief class AGNewsNode. +/// \brief Dataset derived class to represent AGNews dataset. +class AGNewsNode : public NonMappableSourceNode { + public: + /// \brief Constructor. + AGNewsNode(const std::string &dataset_dir, int64_t num_samples, ShuffleMode shuffle, const std::string &usage, + int32_t num_shards, int32_t shard_id, const std::shared_ptr &cache); + + /// \brief Destructor. + ~AGNewsNode() override = default; + + /// \brief Node name getter. + /// \return Name of the current node. + std::string Name() const override { return kAGNewsNode; } + + /// \brief Print the description. + /// \param[in] out The output stream to write output to. + void Print(std::ostream &out) const override; + + /// \brief Copy the node to a new object. + /// \return A shared pointer to the new copy. + std::shared_ptr Copy() override; + + /// \brief A base class override function to create the required runtime dataset op objects for this class. + /// \param[in] node_ops A vector containing shared pointer to the Dataset Ops that this object will create. + /// \return Status Status::OK() if build successfully. + Status Build(std::vector> *const node_ops) override; + + /// \brief Parameters validation. + /// \return Status Status::OK() if all the parameters are valid. + Status ValidateParams() override; + + /// \brief Get the shard id of node. + /// \param[in] shard_id The shard id. + /// \return Status Status::OK() if get shard id successfully. + Status GetShardId(int32_t *shard_id) override; + + /// \brief Getter functions. + const std::string &DatasetDir() const { return dataset_dir_; } + const std::string &Usage() const { return usage_; } + int64_t NumSamples() const { return num_samples_; } + ShuffleMode Shuffle() const { return shuffle_; } + int32_t NumShards() const { return num_shards_; } + int32_t ShardId() const { return shard_id_; } + + /// \brief Base-class override for GetDatasetSize. + /// \param[in] size_getter Shared pointer to DatasetSizeGetter. + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting + /// dataset size at the expense of accuracy. + /// \param[out] dataset_size the size of the dataset. + /// \return Status of the function. + Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) override; + + /// \brief Get the arguments of node + /// \param[out] out_json JSON string of all attributes + /// \return Status of the function + Status to_json(nlohmann::json *out_json) override; + + /// \brief AGNews by itself is a non-mappable dataset that does not support sampling. + /// However, if a cache operator is injected at some other place higher in + /// the tree, that cache can inherit this sampler from the leaf, providing + /// sampling support from the caching layer. That is why we setup the + /// sampler for a leaf node that does not use sampling. Note: This + /// function is common among NonMappableSourceNode and should be promoted + /// to its parent class. + /// \param[in] sampler The sampler to setup. + /// \return Status of the function. + Status SetupSamplerForCache(std::shared_ptr *sampler) override; + + /// \brief If a cache has been added into the ascendant tree over this ag_news node, + /// then the cache will be executing a sampler for fetching the data. + /// As such, any options in the AGNews node need to be reset to its defaults + /// so that this AGNews node will produce the full set of data into the cache. + /// Note: This function is common among NonMappableSourceNode and should be promoted to its + /// parent class. + /// \return Status of the function. + Status MakeSimpleProducer() override; + + /// \brief Generate a list of read file names according to usage. + /// \param[in] usage Part of dataset of AGNews. + /// \param[in] dataset_dir Path to the root directory that contains the dataset. + /// \return std::vector A list of read file names. + std::vector WalkAllFiles(const std::string &usage, const std::string &dataset_dir); + + private: + std::string dataset_dir_; + std::string usage_; + std::vector> column_defaults_; + std::vector column_names_; + int64_t num_samples_; + ShuffleMode shuffle_; + int32_t num_shards_; + int32_t shard_id_; + std::vector ag_news_files_list_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AG_NEWS_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/album_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/album_node.cc index 694964e6..d8b01c31 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/album_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/album_node.cc @@ -28,7 +28,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for AlbumNode @@ -172,4 +172,4 @@ Status AlbumNode::from_json(nlohmann::json json_obj, std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_ALBUM_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.cc old mode 100755 new mode 100644 index 951ba96c..47794266 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.cc @@ -1,195 +1,195 @@ -/** - * Copyright 2021-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.h" - -#include -#include - -namespace mindspore { -namespace dataset { -// Constructor for AmazonReviewNode -AmazonReviewNode::AmazonReviewNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, - ShuffleMode shuffle, int32_t num_shards, int32_t shard_id, - const std::shared_ptr &cache) - : NonMappableSourceNode(std::move(cache)), - dataset_dir_(dataset_dir), - num_samples_(num_samples), - shuffle_(shuffle), - num_shards_(num_shards), - shard_id_(shard_id), - usage_(usage), - amazon_review_files_list_(WalkAllFiles(usage, dataset_dir)) { - // Update the num_shards_ in global context. this number is only used for now by auto_num_worker_pass. - // User discretion is advised. Auto_num_worker_pass is currently an experimental feature which can still work - // if the num_shards_ isn't 100% correct. The reason behind is for now, PreBuildSampler doesn't offer a way to - // return num_shards. Once PreBuildSampler is phased out, this can be cleaned up. - GlobalContext::config_manager()->set_num_shards_for_auto_num_workers(num_shards_); -} - -std::shared_ptr AmazonReviewNode::Copy() { - auto node = - std::make_shared(dataset_dir_, usage_, num_samples_, shuffle_, num_shards_, shard_id_, cache_); - (void)node->SetNumWorkers(num_workers_); - (void)node->SetConnectorQueueSize(connector_que_size_); - return node; -} - -void AmazonReviewNode::Print(std::ostream &out) const { - out << (Name() + "(cache: " + ((cache_ != nullptr) ? "true" : "false") + - ", num_shards: " + std::to_string(num_shards_) + ", shard_id: " + std::to_string(shard_id_) + ")"); -} - -Status AmazonReviewNode::ValidateParams() { - RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("AmazonReviewDataset", dataset_dir_)); - RETURN_IF_NOT_OK(ValidateStringValue("AmazonReviewDataset", usage_, {"train", "test", "all"})); - RETURN_IF_NOT_OK(ValidateDatasetFilesParam("AmazonReviewDataset", amazon_review_files_list_)); - RETURN_IF_NOT_OK(ValidateScalar("AmazonReviewDataset", "num_samples", num_samples_, {0}, false)); - RETURN_IF_NOT_OK(ValidateEnum("AmazonReviewDataset", "ShuffleMode", shuffle_, - {ShuffleMode::kFalse, ShuffleMode::kFiles, ShuffleMode::kGlobal})); - - RETURN_IF_NOT_OK(ValidateDatasetShardParams("AmazonReviewDataset", num_shards_, shard_id_)); - return Status::OK(); -} - -Status AmazonReviewNode::Build(std::vector> *const node_ops) { - bool shuffle_files = (shuffle_ == ShuffleMode::kGlobal || shuffle_ == ShuffleMode::kFiles); - - // Sort the dataset files in a lexicographical order. - std::vector sorted_dataset_files = amazon_review_files_list_; - std::sort(sorted_dataset_files.begin(), sorted_dataset_files.end()); - - std::vector> column_default; - column_default.push_back(std::make_shared>(AmazonReviewOp::STRING, "")); - column_default.push_back(std::make_shared>(AmazonReviewOp::STRING, "")); - column_default.push_back(std::make_shared>(AmazonReviewOp::STRING, "")); - - std::vector column_name = {"label", "title", "content"}; - char field_delim = ','; - std::shared_ptr amazon_review_op = std::make_shared( - num_workers_, num_samples_, worker_connector_size_, connector_que_size_, shuffle_files, num_shards_, shard_id_, - field_delim, column_default, column_name, sorted_dataset_files); - RETURN_IF_NOT_OK(amazon_review_op->Init()); - - // If a global shuffle is used for AmazonReview, it will inject a shuffle op over the AmazonReview. - // But, if there is a cache in the tree, we do not need the global shuffle and the shuffle op should not be - // built.This is achieved in the cache transform pass where we call MakeSimpleProducer to reset AmazonReview's - // shuffle option to false. - if (shuffle_ == ShuffleMode::kGlobal) { - // Inject ShuffleOp. - std::shared_ptr shuffle_op = nullptr; - int64_t num_rows = 0; - - // First, get the number of rows in the dataset. - RETURN_IF_NOT_OK(AmazonReviewOp::CountAllFileRows(sorted_dataset_files, false, &num_rows)); - // Add the shuffle op after this op. - RETURN_IF_NOT_OK( - AddShuffleOp(sorted_dataset_files.size(), num_shards_, num_rows, 0, connector_que_size_, &shuffle_op)); - shuffle_op->SetTotalRepeats(GetTotalRepeats()); - shuffle_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - shuffle_op->Skip(skip_steps_); - node_ops->push_back(shuffle_op); - } - amazon_review_op->SetTotalRepeats(GetTotalRepeats()); - amazon_review_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - node_ops->push_back(amazon_review_op); - return Status::OK(); -} - -Status AmazonReviewNode::GetShardId(int32_t *shard_id) { - *shard_id = shard_id_; - return Status::OK(); -} - -// Get Dataset size -Status AmazonReviewNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) { - if (dataset_size_ > 0) { - *dataset_size = dataset_size_; - return Status::OK(); - } - - int64_t num_rows, sample_size; - RETURN_IF_NOT_OK(AmazonReviewOp::CountAllFileRows(amazon_review_files_list_, false, &num_rows)); - sample_size = num_samples_; - num_rows = static_cast(ceil(num_rows / (1.0 * num_shards_))); - *dataset_size = sample_size > 0 ? std::min(num_rows, sample_size) : num_rows; - dataset_size_ = *dataset_size; - return Status::OK(); -} - -Status AmazonReviewNode::to_json(nlohmann::json *out_json) { - nlohmann::json args; - args["num_parallel_workers"] = num_workers_; - args["connector_queue_size"] = connector_que_size_; - args["dataset_dir"] = dataset_dir_; - args["usage"] = usage_; - args["num_samples"] = num_samples_; - args["shuffle"] = shuffle_; - args["num_shards"] = num_shards_; - args["shard_id"] = shard_id_; - if (cache_ != nullptr) { - nlohmann::json cache_args; - RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); - args["cache"] = cache_args; - } - *out_json = args; - return Status::OK(); -} - -// Note: The following two functions are common among NonMappableSourceNode and should be promoted to its parent -// class. AmazonReview by itself is a non-mappable dataset that does not support sampling. However, if a cache -// operator is injected at some other place higher in the tree, that cache can inherit this sampler from the leaf, -// providing sampling support from the caching layer. That is why we setup the sampler for a leaf node that does not -// use sampling. -Status AmazonReviewNode::SetupSamplerForCache(std::shared_ptr *sampler) { - *sampler = SelectSampler(num_samples_, shuffle_, num_shards_, shard_id_); - return Status::OK(); -} - -// If a cache has been added into the ascendant tree over this AmazonReview node, then the cache will be executing -// a sampler for fetching the data. As such, any options in the AmazonReview node need to be reset to its defaults so -// If a cache has been added into the ascendant tree over this AmazonReview node, then the cache will be executing -Status AmazonReviewNode::MakeSimpleProducer() { - shard_id_ = 0; - num_shards_ = 1; - shuffle_ = ShuffleMode::kFalse; - num_samples_ = 0; - return Status::OK(); -} - -std::vector AmazonReviewNode::WalkAllFiles(const std::string &usage, const std::string &dataset_dir) { - std::vector amazon_review_files_list; - Path train_prefix("train.csv"); - Path test_prefix("test.csv"); - Path dir(dataset_dir); - - if (usage == "train") { - Path temp_path = dir / train_prefix; - amazon_review_files_list.push_back(temp_path.ToString()); - } else if (usage == "test") { - Path temp_path = dir / test_prefix; - amazon_review_files_list.push_back(temp_path.ToString()); - } else { - Path temp_path = dir / train_prefix; - amazon_review_files_list.push_back(temp_path.ToString()); - Path temp_path1 = dir / test_prefix; - amazon_review_files_list.push_back(temp_path1.ToString()); - } - return amazon_review_files_list; -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.h" + +#include +#include + +namespace mindspore::lite { +namespace dataset { +// Constructor for AmazonReviewNode +AmazonReviewNode::AmazonReviewNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, + ShuffleMode shuffle, int32_t num_shards, int32_t shard_id, + const std::shared_ptr &cache) + : NonMappableSourceNode(std::move(cache)), + dataset_dir_(dataset_dir), + num_samples_(num_samples), + shuffle_(shuffle), + num_shards_(num_shards), + shard_id_(shard_id), + usage_(usage), + amazon_review_files_list_(WalkAllFiles(usage, dataset_dir)) { + // Update the num_shards_ in global context. this number is only used for now by auto_num_worker_pass. + // User discretion is advised. Auto_num_worker_pass is currently an experimental feature which can still work + // if the num_shards_ isn't 100% correct. The reason behind is for now, PreBuildSampler doesn't offer a way to + // return num_shards. Once PreBuildSampler is phased out, this can be cleaned up. + GlobalContext::config_manager()->set_num_shards_for_auto_num_workers(num_shards_); +} + +std::shared_ptr AmazonReviewNode::Copy() { + auto node = + std::make_shared(dataset_dir_, usage_, num_samples_, shuffle_, num_shards_, shard_id_, cache_); + (void)node->SetNumWorkers(num_workers_); + (void)node->SetConnectorQueueSize(connector_que_size_); + return node; +} + +void AmazonReviewNode::Print(std::ostream &out) const { + out << (Name() + "(cache: " + ((cache_ != nullptr) ? "true" : "false") + + ", num_shards: " + std::to_string(num_shards_) + ", shard_id: " + std::to_string(shard_id_) + ")"); +} + +Status AmazonReviewNode::ValidateParams() { + RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("AmazonReviewDataset", dataset_dir_)); + RETURN_IF_NOT_OK(ValidateStringValue("AmazonReviewDataset", usage_, {"train", "test", "all"})); + RETURN_IF_NOT_OK(ValidateDatasetFilesParam("AmazonReviewDataset", amazon_review_files_list_)); + RETURN_IF_NOT_OK(ValidateScalar("AmazonReviewDataset", "num_samples", num_samples_, {0}, false)); + RETURN_IF_NOT_OK(ValidateEnum("AmazonReviewDataset", "ShuffleMode", shuffle_, + {ShuffleMode::kFalse, ShuffleMode::kFiles, ShuffleMode::kGlobal})); + + RETURN_IF_NOT_OK(ValidateDatasetShardParams("AmazonReviewDataset", num_shards_, shard_id_)); + return Status::OK(); +} + +Status AmazonReviewNode::Build(std::vector> *const node_ops) { + bool shuffle_files = (shuffle_ == ShuffleMode::kGlobal || shuffle_ == ShuffleMode::kFiles); + + // Sort the dataset files in a lexicographical order. + std::vector sorted_dataset_files = amazon_review_files_list_; + std::sort(sorted_dataset_files.begin(), sorted_dataset_files.end()); + + std::vector> column_default; + column_default.push_back(std::make_shared>(AmazonReviewOp::STRING, "")); + column_default.push_back(std::make_shared>(AmazonReviewOp::STRING, "")); + column_default.push_back(std::make_shared>(AmazonReviewOp::STRING, "")); + + std::vector column_name = {"label", "title", "content"}; + char field_delim = ','; + std::shared_ptr amazon_review_op = std::make_shared( + num_workers_, num_samples_, worker_connector_size_, connector_que_size_, shuffle_files, num_shards_, shard_id_, + field_delim, column_default, column_name, sorted_dataset_files); + RETURN_IF_NOT_OK(amazon_review_op->Init()); + + // If a global shuffle is used for AmazonReview, it will inject a shuffle op over the AmazonReview. + // But, if there is a cache in the tree, we do not need the global shuffle and the shuffle op should not be + // built.This is achieved in the cache transform pass where we call MakeSimpleProducer to reset AmazonReview's + // shuffle option to false. + if (shuffle_ == ShuffleMode::kGlobal) { + // Inject ShuffleOp. + std::shared_ptr shuffle_op = nullptr; + int64_t num_rows = 0; + + // First, get the number of rows in the dataset. + RETURN_IF_NOT_OK(AmazonReviewOp::CountAllFileRows(sorted_dataset_files, false, &num_rows)); + // Add the shuffle op after this op. + RETURN_IF_NOT_OK( + AddShuffleOp(sorted_dataset_files.size(), num_shards_, num_rows, 0, connector_que_size_, &shuffle_op)); + shuffle_op->SetTotalRepeats(GetTotalRepeats()); + shuffle_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + shuffle_op->Skip(skip_steps_); + node_ops->push_back(shuffle_op); + } + amazon_review_op->SetTotalRepeats(GetTotalRepeats()); + amazon_review_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + node_ops->push_back(amazon_review_op); + return Status::OK(); +} + +Status AmazonReviewNode::GetShardId(int32_t *shard_id) { + *shard_id = shard_id_; + return Status::OK(); +} + +// Get Dataset size +Status AmazonReviewNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) { + if (dataset_size_ > 0) { + *dataset_size = dataset_size_; + return Status::OK(); + } + + int64_t num_rows, sample_size; + RETURN_IF_NOT_OK(AmazonReviewOp::CountAllFileRows(amazon_review_files_list_, false, &num_rows)); + sample_size = num_samples_; + num_rows = static_cast(ceil(num_rows / (1.0 * num_shards_))); + *dataset_size = sample_size > 0 ? std::min(num_rows, sample_size) : num_rows; + dataset_size_ = *dataset_size; + return Status::OK(); +} + +Status AmazonReviewNode::to_json(nlohmann::json *out_json) { + nlohmann::json args; + args["num_parallel_workers"] = num_workers_; + args["connector_queue_size"] = connector_que_size_; + args["dataset_dir"] = dataset_dir_; + args["usage"] = usage_; + args["num_samples"] = num_samples_; + args["shuffle"] = shuffle_; + args["num_shards"] = num_shards_; + args["shard_id"] = shard_id_; + if (cache_ != nullptr) { + nlohmann::json cache_args; + RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); + args["cache"] = cache_args; + } + *out_json = args; + return Status::OK(); +} + +// Note: The following two functions are common among NonMappableSourceNode and should be promoted to its parent +// class. AmazonReview by itself is a non-mappable dataset that does not support sampling. However, if a cache +// operator is injected at some other place higher in the tree, that cache can inherit this sampler from the leaf, +// providing sampling support from the caching layer. That is why we setup the sampler for a leaf node that does not +// use sampling. +Status AmazonReviewNode::SetupSamplerForCache(std::shared_ptr *sampler) { + *sampler = SelectSampler(num_samples_, shuffle_, num_shards_, shard_id_); + return Status::OK(); +} + +// If a cache has been added into the ascendant tree over this AmazonReview node, then the cache will be executing +// a sampler for fetching the data. As such, any options in the AmazonReview node need to be reset to its defaults so +// If a cache has been added into the ascendant tree over this AmazonReview node, then the cache will be executing +Status AmazonReviewNode::MakeSimpleProducer() { + shard_id_ = 0; + num_shards_ = 1; + shuffle_ = ShuffleMode::kFalse; + num_samples_ = 0; + return Status::OK(); +} + +std::vector AmazonReviewNode::WalkAllFiles(const std::string &usage, const std::string &dataset_dir) { + std::vector amazon_review_files_list; + Path train_prefix("train.csv"); + Path test_prefix("test.csv"); + Path dir(dataset_dir); + + if (usage == "train") { + Path temp_path = dir / train_prefix; + amazon_review_files_list.push_back(temp_path.ToString()); + } else if (usage == "test") { + Path temp_path = dir / test_prefix; + amazon_review_files_list.push_back(temp_path.ToString()); + } else { + Path temp_path = dir / train_prefix; + amazon_review_files_list.push_back(temp_path.ToString()); + Path temp_path1 = dir / test_prefix; + amazon_review_files_list.push_back(temp_path1.ToString()); + } + return amazon_review_files_list; +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.h old mode 100755 new mode 100644 index ca55a17a..868c745c --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/amazon_review_node.h @@ -1,120 +1,120 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AMAZON_REVIEW_NODE_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AMAZON_REVIEW_NODE_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.h" -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" - -namespace mindspore { -namespace dataset { -class AmazonReviewNode : public NonMappableSourceNode { - public: - /// \brief Constructor. - AmazonReviewNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, - int32_t num_shards, int32_t shard_id, const std::shared_ptr &cache); - - /// \brief Destructor. - ~AmazonReviewNode() override = default; - - /// \brief Node name getter. - /// \return Name of the current node. - std::string Name() const override { return kAmazonReviewNode; } - - /// \brief Print the description. - /// \param[out] out The output stream to write output to. - void Print(std::ostream &out) const override; - - /// \brief Copy the node to a new object. - /// \return A shared pointer to the new copy. - std::shared_ptr Copy() override; - - /// \brief A base class override function to create the required runtime dataset op objects for this class. - /// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create. - /// \return Status Status::OK() if build successfully. - Status Build(std::vector> *const node_ops) override; - - /// \brief Parameters validation. - /// \return Status Status::OK() if all the parameters are valid. - Status ValidateParams() override; - - /// \brief Get the shard id of node. - /// \param[in] shard_id The shard id. - /// \return Status Status::OK() if get shard id successfully. - Status GetShardId(int32_t *shard_id) override; - - /// \brief Base-class override for GetDatasetSize. - /// \param[in] size_getter Shared pointer to DatasetSizeGetter. - /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting - /// dataset size at the expense of accuracy. - /// \param[out] dataset_size The size of the dataset. - /// \return Status of the function. - Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) override; - - /// \brief Getter functions - const std::string &DatasetDir() const { return dataset_dir_; } - const std::string &Usage() const { return usage_; } - int64_t NumSamples() const { return num_samples_; } - ShuffleMode Shuffle() const { return shuffle_; } - int32_t NumShards() const { return num_shards_; } - int32_t ShardId() const { return shard_id_; } - - /// \brief Get the arguments of node. - /// \param[out] out_json JSON string of all attributes. - /// \return Status of the function. - Status to_json(nlohmann::json *out_json) override; - - /// \brief AmazonReview by itself is a non-mappable dataset that does not support sampling. - /// However, if a cache operator is injected at some other place higher in the tree, that cache can - /// inherit this sampler from the leaf, providing sampling support from the caching layer. - /// That is why we setup the sampler for a leaf node that does not use sampling. - /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. - /// \param[in] sampler The sampler to setup. - /// \return Status of the function. - Status SetupSamplerForCache(std::shared_ptr *sampler) override; - - /// \brief If a cache has been added into the ascendant tree over this AmazonReview node, then the cache will be - /// executing a sampler for fetching the data. As such, any options in the AmazonReview node need to be reset - /// to its defaults so that this AmazonReview node will produce the full set of data into the cache. - /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. - /// \return Status of the function. - Status MakeSimpleProducer() override; - - /// \brief Generate a list of read file names according to usage. - /// \param[in] usage Part of dataset of AmazonReview. - /// \param[in] dataset_dir Path to the root directory that contains the dataset. - /// \return std::vector A list of read file names. - std::vector WalkAllFiles(const std::string &usage, const std::string &dataset_dir); - - private: - std::string dataset_dir_; - std::string usage_; - std::vector> column_defaults_; - std::vector column_names_; - int64_t num_samples_; - ShuffleMode shuffle_; - int32_t num_shards_; - int32_t shard_id_; - std::vector amazon_review_files_list_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AMAZON_REVIEW_NODE_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AMAZON_REVIEW_NODE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AMAZON_REVIEW_NODE_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/amazon_review_op.h" +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" + +namespace mindspore::lite { +namespace dataset { +class AmazonReviewNode : public NonMappableSourceNode { + public: + /// \brief Constructor. + AmazonReviewNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, + int32_t num_shards, int32_t shard_id, const std::shared_ptr &cache); + + /// \brief Destructor. + ~AmazonReviewNode() override = default; + + /// \brief Node name getter. + /// \return Name of the current node. + std::string Name() const override { return kAmazonReviewNode; } + + /// \brief Print the description. + /// \param[out] out The output stream to write output to. + void Print(std::ostream &out) const override; + + /// \brief Copy the node to a new object. + /// \return A shared pointer to the new copy. + std::shared_ptr Copy() override; + + /// \brief A base class override function to create the required runtime dataset op objects for this class. + /// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create. + /// \return Status Status::OK() if build successfully. + Status Build(std::vector> *const node_ops) override; + + /// \brief Parameters validation. + /// \return Status Status::OK() if all the parameters are valid. + Status ValidateParams() override; + + /// \brief Get the shard id of node. + /// \param[in] shard_id The shard id. + /// \return Status Status::OK() if get shard id successfully. + Status GetShardId(int32_t *shard_id) override; + + /// \brief Base-class override for GetDatasetSize. + /// \param[in] size_getter Shared pointer to DatasetSizeGetter. + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting + /// dataset size at the expense of accuracy. + /// \param[out] dataset_size The size of the dataset. + /// \return Status of the function. + Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) override; + + /// \brief Getter functions + const std::string &DatasetDir() const { return dataset_dir_; } + const std::string &Usage() const { return usage_; } + int64_t NumSamples() const { return num_samples_; } + ShuffleMode Shuffle() const { return shuffle_; } + int32_t NumShards() const { return num_shards_; } + int32_t ShardId() const { return shard_id_; } + + /// \brief Get the arguments of node. + /// \param[out] out_json JSON string of all attributes. + /// \return Status of the function. + Status to_json(nlohmann::json *out_json) override; + + /// \brief AmazonReview by itself is a non-mappable dataset that does not support sampling. + /// However, if a cache operator is injected at some other place higher in the tree, that cache can + /// inherit this sampler from the leaf, providing sampling support from the caching layer. + /// That is why we setup the sampler for a leaf node that does not use sampling. + /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. + /// \param[in] sampler The sampler to setup. + /// \return Status of the function. + Status SetupSamplerForCache(std::shared_ptr *sampler) override; + + /// \brief If a cache has been added into the ascendant tree over this AmazonReview node, then the cache will be + /// executing a sampler for fetching the data. As such, any options in the AmazonReview node need to be reset + /// to its defaults so that this AmazonReview node will produce the full set of data into the cache. + /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. + /// \return Status of the function. + Status MakeSimpleProducer() override; + + /// \brief Generate a list of read file names according to usage. + /// \param[in] usage Part of dataset of AmazonReview. + /// \param[in] dataset_dir Path to the root directory that contains the dataset. + /// \return std::vector A list of read file names. + std::vector WalkAllFiles(const std::string &usage, const std::string &dataset_dir); + + private: + std::string dataset_dir_; + std::string usage_; + std::vector> column_defaults_; + std::vector column_names_; + int64_t num_samples_; + ShuffleMode shuffle_; + int32_t num_shards_; + int32_t shard_id_; + std::vector amazon_review_files_list_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_AMAZON_REVIEW_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/caltech256_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/caltech256_node.cc old mode 100755 new mode 100644 index 2334d56b..693c7987 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/caltech256_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/caltech256_node.cc @@ -28,7 +28,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const std::set kExts = {".jpg", ".JPEG"}; @@ -135,4 +135,4 @@ Status Caltech256Node::from_json(nlohmann::json json_obj, std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CALTECH256_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/celeba_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/celeba_node.cc index 0ab569b1..81d5108d 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/celeba_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/celeba_node.cc @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/engine/serdes.h" #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #ifdef ENABLE_PYTHON @@ -261,4 +261,4 @@ Status CelebANode::from_json(nlohmann::json json_obj, std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CIFAR100_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cifar10_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cifar10_node.cc index 7817625c..4a3b33cf 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cifar10_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cifar10_node.cc @@ -27,7 +27,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for Cifar10Node @@ -145,4 +145,4 @@ Status Cifar10Node::from_json(nlohmann::json json_obj, std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CIFAR10_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cityscapes_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cityscapes_node.cc index 9fcd3940..f5e8cbb6 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cityscapes_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cityscapes_node.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/cityscapes_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for CityscapesNode CityscapesNode::CityscapesNode(const std::string &dataset_dir, const std::string &usage, @@ -144,4 +144,4 @@ Status CityscapesNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cityscapes_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cityscapes_node.h index 290c2246..be8d0b62 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cityscapes_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cityscapes_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CityscapesNode : public MappableSourceNode { public: @@ -105,5 +105,5 @@ class CityscapesNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CITYSCAPES_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/clue_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/clue_node.cc index 3f29e6bf..6ad2692e 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/clue_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/clue_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/clue_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for CLUENode @@ -288,4 +288,4 @@ Status CLUENode::MakeSimpleProducer() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/clue_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/clue_node.h index 78a20da9..7914c75f 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/clue_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/clue_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class CLUENode /// \brief A Dataset derived class to represent CLUE dataset @@ -145,5 +145,5 @@ class CLUENode : public NonMappableSourceNode { int32_t shard_id_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CLUE_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cmu_arctic_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cmu_arctic_node.cc index be43a0ac..c34e4779 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cmu_arctic_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cmu_arctic_node.cc @@ -1,116 +1,116 @@ -/** - * Copyright 2022-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cmu_arctic_node.h" - -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.h" -#include "mindspore-lite/minddata/dataset/util/status.h" - -namespace mindspore { -namespace dataset { -CMUArcticNode::CMUArcticNode(const std::string &dataset_dir, const std::string &name, - std::shared_ptr sampler, std::shared_ptr cache) - : MappableSourceNode(std::move(cache)), dataset_dir_(dataset_dir), name_(name), sampler_(sampler) {} - -void CMUArcticNode::Print(std::ostream &out) const { out << Name(); } - -std::shared_ptr CMUArcticNode::Copy() { - std::shared_ptr sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy(); - auto node = std::make_shared(dataset_dir_, name_, sampler, cache_); - (void)node->SetNumWorkers(num_workers_); - (void)node->SetConnectorQueueSize(connector_que_size_); - return node; -} - -Status CMUArcticNode::ValidateParams() { - RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("CMUArcticDataset", dataset_dir_)); - RETURN_IF_NOT_OK(ValidateDatasetSampler("CMUArcticDataset", sampler_)); - RETURN_IF_NOT_OK(ValidateStringValue("CMUArcticDataset", name_, - {"aew", "ahw", "aup", "awb", "axb", "bdl", "clb", "eey", "fem", "gka", "jmk", - "ksp", "ljm", "lnh", "rms", "rxr", "slp", "slt"})); - return Status::OK(); -} - -Status CMUArcticNode::Build(std::vector> *const node_ops) { - auto schema = std::make_unique(); - - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kCv, 1))); - TensorShape scalar_rate = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); - TensorShape scalar_utterance = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("transcript", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance))); - TensorShape scalar_utterance_id = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("utterance_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance_id))); - - std::shared_ptr sampler_rt = nullptr; - RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); - - auto op = std::make_shared(dataset_dir_, name_, num_workers_, connector_que_size_, std::move(schema), - std::move(sampler_rt)); - op->SetTotalRepeats(GetTotalRepeats()); - op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - node_ops->push_back(op); - - return Status::OK(); -} - -Status CMUArcticNode::GetShardId(int32_t *shard_id) { - *shard_id = sampler_->ShardId(); - return Status::OK(); -} - -Status CMUArcticNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) { - if (dataset_size_ > 0) { - *dataset_size = dataset_size_; - return Status::OK(); - } - int64_t num_rows, sample_size; - RETURN_IF_NOT_OK(CMUArcticOp::CountTotalRows(dataset_dir_, name_, &num_rows)); - std::shared_ptr sampler_rt = nullptr; - RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); - sample_size = sampler_rt->CalculateNumSamples(num_rows); - if (sample_size == -1) { - RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size)); - } - *dataset_size = sample_size; - dataset_size_ = *dataset_size; - return Status::OK(); -} - -Status CMUArcticNode::to_json(nlohmann::json *out_json) { - nlohmann::json args, sampler_args; - RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args)); - args["sampler"] = sampler_args; - args["num_parallel_workers"] = num_workers_; - args["connector_queue_size"] = connector_que_size_; - args["dataset_dir"] = dataset_dir_; - args["name"] = name_; - if (cache_ != nullptr) { - nlohmann::json cache_args; - RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); - args["cache"] = cache_args; - } - *out_json = args; - return Status::OK(); -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2022-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cmu_arctic_node.h" + +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/cmu_arctic_op.h" +#include "mindspore-lite/minddata/dataset/util/status.h" + +namespace mindspore::lite { +namespace dataset { +CMUArcticNode::CMUArcticNode(const std::string &dataset_dir, const std::string &name, + std::shared_ptr sampler, std::shared_ptr cache) + : MappableSourceNode(std::move(cache)), dataset_dir_(dataset_dir), name_(name), sampler_(sampler) {} + +void CMUArcticNode::Print(std::ostream &out) const { out << Name(); } + +std::shared_ptr CMUArcticNode::Copy() { + std::shared_ptr sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy(); + auto node = std::make_shared(dataset_dir_, name_, sampler, cache_); + (void)node->SetNumWorkers(num_workers_); + (void)node->SetConnectorQueueSize(connector_que_size_); + return node; +} + +Status CMUArcticNode::ValidateParams() { + RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("CMUArcticDataset", dataset_dir_)); + RETURN_IF_NOT_OK(ValidateDatasetSampler("CMUArcticDataset", sampler_)); + RETURN_IF_NOT_OK(ValidateStringValue("CMUArcticDataset", name_, + {"aew", "ahw", "aup", "awb", "axb", "bdl", "clb", "eey", "fem", "gka", "jmk", + "ksp", "ljm", "lnh", "rms", "rxr", "slp", "slt"})); + return Status::OK(); +} + +Status CMUArcticNode::Build(std::vector> *const node_ops) { + auto schema = std::make_unique(); + + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kCv, 1))); + TensorShape scalar_rate = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); + TensorShape scalar_utterance = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("transcript", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance))); + TensorShape scalar_utterance_id = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("utterance_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance_id))); + + std::shared_ptr sampler_rt = nullptr; + RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); + + auto op = std::make_shared(dataset_dir_, name_, num_workers_, connector_que_size_, std::move(schema), + std::move(sampler_rt)); + op->SetTotalRepeats(GetTotalRepeats()); + op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + node_ops->push_back(op); + + return Status::OK(); +} + +Status CMUArcticNode::GetShardId(int32_t *shard_id) { + *shard_id = sampler_->ShardId(); + return Status::OK(); +} + +Status CMUArcticNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) { + if (dataset_size_ > 0) { + *dataset_size = dataset_size_; + return Status::OK(); + } + int64_t num_rows, sample_size; + RETURN_IF_NOT_OK(CMUArcticOp::CountTotalRows(dataset_dir_, name_, &num_rows)); + std::shared_ptr sampler_rt = nullptr; + RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); + sample_size = sampler_rt->CalculateNumSamples(num_rows); + if (sample_size == -1) { + RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size)); + } + *dataset_size = sample_size; + dataset_size_ = *dataset_size; + return Status::OK(); +} + +Status CMUArcticNode::to_json(nlohmann::json *out_json) { + nlohmann::json args, sampler_args; + RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args)); + args["sampler"] = sampler_args; + args["num_parallel_workers"] = num_workers_; + args["connector_queue_size"] = connector_que_size_; + args["dataset_dir"] = dataset_dir_; + args["name"] = name_; + if (cache_ != nullptr) { + nlohmann::json cache_args; + RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); + args["cache"] = cache_args; + } + *out_json = args; + return Status::OK(); +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cmu_arctic_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cmu_arctic_node.h index 5179bcb0..c1039e14 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cmu_arctic_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/cmu_arctic_node.h @@ -1,95 +1,95 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CMU_ARCTIC_NODE_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CMU_ARCTIC_NODE_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" - -namespace mindspore { -namespace dataset { -class CMUArcticNode : public MappableSourceNode { - public: - /// \brief Constructor. - CMUArcticNode(const std::string &dataset_dir, const std::string &name, std::shared_ptr sampler, - std::shared_ptr cache); - - /// \brief Destructor. - ~CMUArcticNode() = default; - - /// \brief Node name getter. - /// \return Name of the current node. - std::string Name() const override { return kCMUArcticNode; } - - /// \brief Print the description. - /// \param out The output stream to write output to. - void Print(std::ostream &out) const override; - - /// \brief Copy the node to a new object. - /// \return A shared pointer to the new copy. - std::shared_ptr Copy() override; - - /// \brief a base class override function to create the required runtime dataset op objects for this class. - /// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create. - /// \return Status Status::OK() if build successfully. - Status Build(std::vector> *const node_ops) override; - - /// \brief Parameters validation. - /// \return Status Status::OK() if all the parameters are valid. - Status ValidateParams() override; - - /// \brief Get the shard id of node. - /// \param[in] shard_id The shard ID within num_shards. - /// \return Status Status::OK() if get shard id successfully. - Status GetShardId(int32_t *shard_id) override; - - /// \brief Base-class override for GetDatasetSize. - /// \param[in] size_getter Shared pointer to DatasetSizeGetter. - /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting - /// dataset size at the expense of accuracy. - /// \param[out] dataset_size the size of the dataset. - /// \return Status of the function. - Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) override; - - /// \brief Getter functions. - const std::string &DatasetDir() const { return dataset_dir_; } - const std::string &GetName() const { return name_; } - - /// \brief Get the arguments of node. - /// \param[out] out_json JSON string of all attributes. - /// \return Status of the function. - Status to_json(nlohmann::json *out_json) override; - - /// \brief Sampler getter. - /// \return SamplerObj of the current node. - std::shared_ptr Sampler() override { return sampler_; } - - /// \brief Sampler setter. - /// \param[in] sampler Tells CMUArcticOp what to read. - void SetSampler(std::shared_ptr sampler) override { sampler_ = sampler; } - - private: - std::string dataset_dir_; - std::string name_; - std::shared_ptr sampler_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CMU_ARCTIC_NODE_H_ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CMU_ARCTIC_NODE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CMU_ARCTIC_NODE_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" + +namespace mindspore::lite { +namespace dataset { +class CMUArcticNode : public MappableSourceNode { + public: + /// \brief Constructor. + CMUArcticNode(const std::string &dataset_dir, const std::string &name, std::shared_ptr sampler, + std::shared_ptr cache); + + /// \brief Destructor. + ~CMUArcticNode() = default; + + /// \brief Node name getter. + /// \return Name of the current node. + std::string Name() const override { return kCMUArcticNode; } + + /// \brief Print the description. + /// \param out The output stream to write output to. + void Print(std::ostream &out) const override; + + /// \brief Copy the node to a new object. + /// \return A shared pointer to the new copy. + std::shared_ptr Copy() override; + + /// \brief a base class override function to create the required runtime dataset op objects for this class. + /// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create. + /// \return Status Status::OK() if build successfully. + Status Build(std::vector> *const node_ops) override; + + /// \brief Parameters validation. + /// \return Status Status::OK() if all the parameters are valid. + Status ValidateParams() override; + + /// \brief Get the shard id of node. + /// \param[in] shard_id The shard ID within num_shards. + /// \return Status Status::OK() if get shard id successfully. + Status GetShardId(int32_t *shard_id) override; + + /// \brief Base-class override for GetDatasetSize. + /// \param[in] size_getter Shared pointer to DatasetSizeGetter. + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting + /// dataset size at the expense of accuracy. + /// \param[out] dataset_size the size of the dataset. + /// \return Status of the function. + Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) override; + + /// \brief Getter functions. + const std::string &DatasetDir() const { return dataset_dir_; } + const std::string &GetName() const { return name_; } + + /// \brief Get the arguments of node. + /// \param[out] out_json JSON string of all attributes. + /// \return Status of the function. + Status to_json(nlohmann::json *out_json) override; + + /// \brief Sampler getter. + /// \return SamplerObj of the current node. + std::shared_ptr Sampler() override { return sampler_; } + + /// \brief Sampler setter. + /// \param[in] sampler Tells CMUArcticOp what to read. + void SetSampler(std::shared_ptr sampler) override { sampler_ = sampler; } + + private: + std::string dataset_dir_; + std::string name_; + std::shared_ptr sampler_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CMU_ARCTIC_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/coco_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/coco_node.cc index 83bd6a09..1b53ef0f 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/coco_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/coco_node.cc @@ -27,7 +27,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #ifdef ENABLE_PYTHON @@ -248,4 +248,4 @@ Status CocoNode::from_json(nlohmann::json json_obj, std::shared_ptr } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/coco_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/coco_node.h index fa1933a5..259dc62e 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/coco_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/coco_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CocoNode : public MappableSourceNode { public: @@ -115,5 +115,5 @@ class CocoNode : public MappableSourceNode { #endif }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_COCO_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/conll2000_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/conll2000_node.cc index bfb2bde4..67183072 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/conll2000_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/conll2000_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/conll2000_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for CoNLL2000Node. CoNLL2000Node::CoNLL2000Node(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, @@ -203,4 +203,4 @@ std::vector CoNLL2000Node::WalkAllFiles(const std::string &usage, c return conll2000_file_list; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/conll2000_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/conll2000_node.h index ba1f8ef3..be1c44d6 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/conll2000_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/conll2000_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class CoNLL2000Node. /// \brief A Dataset derived class to represent CoNLL2000 dataset. @@ -126,5 +126,5 @@ class CoNLL2000Node : public NonMappableSourceNode { std::vector conll2000_file_list_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CONLL2000_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/csv_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/csv_node.cc index 000b3045..32a259f7 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/csv_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/csv_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/csv_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for CSVNode @@ -231,4 +231,4 @@ Status CSVNode::MakeSimpleProducer() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/csv_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/csv_node.h index c2a13df6..f4a564e4 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/csv_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/csv_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Record type for CSV enum CsvType : uint8_t { INT = 0, FLOAT, STRING }; @@ -146,5 +146,5 @@ class CSVNode : public NonMappableSourceNode { int32_t shard_id_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_CSV_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/dbpedia_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/dbpedia_node.cc index c00b1011..81877723 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/dbpedia_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/dbpedia_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/util/path.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { DBpediaNode::DBpediaNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, int32_t num_shards, int32_t shard_id, std::shared_ptr cache) @@ -206,4 +206,4 @@ Status DBpediaNode::MakeSimpleProducer() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/dbpedia_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/dbpedia_node.h index 599ee554..de9d6032 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/dbpedia_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/dbpedia_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/dbpedia_op.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DBpediaNode : public NonMappableSourceNode { public: @@ -116,5 +116,5 @@ class DBpediaNode : public NonMappableSourceNode { int32_t shard_id_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_DBPEDIA_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/div2k_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/div2k_node.cc index 333e1c62..37073329 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/div2k_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/div2k_node.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/div2k_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for DIV2KNode DIV2KNode::DIV2KNode(const std::string &dataset_dir, const std::string &usage, const std::string &downgrade, @@ -153,4 +153,4 @@ Status DIV2KNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/div2k_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/div2k_node.h index c588e2fa..72ca3d6b 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/div2k_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/div2k_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DIV2KNode : public MappableSourceNode { public: @@ -104,5 +104,5 @@ class DIV2KNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_DIV2K_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/emnist_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/emnist_node.cc index 731ec0b4..d5bf40ad 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/emnist_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/emnist_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/emnist_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { EMnistNode::EMnistNode(const std::string &dataset_dir, const std::string &name, const std::string &usage, std::shared_ptr sampler, std::shared_ptr cache) @@ -117,4 +117,4 @@ Status EMnistNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/emnist_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/emnist_node.h index 7680ccb3..c3622ec7 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/emnist_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/emnist_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class EMnistNode : public MappableSourceNode { public: @@ -106,5 +106,5 @@ class EMnistNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_EMNIST_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/en_wik9_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/en_wik9_node.cc index b546476a..336d7a98 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/en_wik9_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/en_wik9_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/en_wik9_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for EnWik9Node EnWik9Node::EnWik9Node(const std::string &dataset_dir, int32_t num_samples, ShuffleMode shuffle, int32_t num_shards, @@ -171,4 +171,4 @@ Status EnWik9Node::MakeSimpleProducer() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/en_wik9_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/en_wik9_node.h index 0e399748..40bd9dc0 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/en_wik9_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/en_wik9_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class EnWik9Node. /// \brief A Dataset derived class to represent EnWik9 dataset. @@ -132,5 +132,5 @@ class EnWik9Node : public NonMappableSourceNode { std::vector src_target_file_list_; // file list; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_EN_WIK9_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fake_image_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fake_image_node.cc index aa99fb03..09ec2c5a 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fake_image_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fake_image_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/fake_image_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { FakeImageNode::FakeImageNode(int32_t num_images, const std::vector &image_size, int32_t num_classes, int32_t base_seed, std::shared_ptr sampler, @@ -132,4 +132,4 @@ Status FakeImageNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fake_image_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fake_image_node.h index 1814c9b3..74820762 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fake_image_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fake_image_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class FakeImageNode : public MappableSourceNode { public: @@ -95,5 +95,5 @@ class FakeImageNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_FAKE_IMAGE_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fashion_mnist_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fashion_mnist_node.cc index e1f0f00d..377a772d 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fashion_mnist_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fashion_mnist_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/fashion_mnist_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { FashionMnistNode::FashionMnistNode(const std::string &dataset_dir, const std::string &usage, std::shared_ptr sampler, std::shared_ptr cache) @@ -111,4 +111,4 @@ Status FashionMnistNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fashion_mnist_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fashion_mnist_node.h index 1861cb49..79965f91 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fashion_mnist_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/fashion_mnist_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class FashionMnistNode : public MappableSourceNode { public: @@ -90,5 +90,5 @@ class FashionMnistNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_FASHION_MNIST_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/flickr_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/flickr_node.cc index d52223fc..c37e196c 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/flickr_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/flickr_node.cc @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for FlickrNode FlickrNode::FlickrNode(const std::string &dataset_dir, const std::string &annotation_file, bool decode, @@ -167,4 +167,4 @@ Status FlickrNode::from_json(nlohmann::json json_obj, std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_FLICKR_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/food101_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/food101_node.cc index 98e1bc12..67e46ef2 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/food101_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/food101_node.cc @@ -27,7 +27,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Food101Node::Food101Node(const std::string &dataset_dir, const std::string &usage, bool decode, const std::shared_ptr &sampler, std::shared_ptr cache) @@ -149,4 +149,4 @@ Status Food101Node::from_json(nlohmann::json json_obj, std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_FOOD101_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/generator_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/generator_node.cc index d33a1999..764d320b 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/generator_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/generator_node.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { GeneratorNode::GeneratorNode(const py::function &generator_function, const std::vector &column_names, const std::vector &column_types, int64_t source_len, @@ -179,4 +179,4 @@ Status GeneratorNode::GetDatasetSize(const std::shared_ptr &s } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/generator_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/generator_node.h index 9731b3f1..7dd538d9 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/generator_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/generator_node.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/repeat_node.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class GeneratorNode /// \brief A Dataset derived class to represent GeneratorNode dataset @@ -140,5 +140,5 @@ class GeneratorNode : public MappableSourceNode { Status AcceptAfter(IRNodePass *p, bool *const modified) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_GENERATOR_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/gtzan_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/gtzan_node.cc index 2d9c2475..cb7d5470 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/gtzan_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/gtzan_node.cc @@ -1,110 +1,110 @@ -/** - * Copyright 2022-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/gtzan_node.h" - -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.h" -#include "mindspore-lite/minddata/dataset/util/status.h" - -namespace mindspore { -namespace dataset { -GTZANNode::GTZANNode(const std::string &dataset_dir, const std::string &usage, std::shared_ptr sampler, - std::shared_ptr cache) - : MappableSourceNode(std::move(cache)), dataset_dir_(dataset_dir), usage_(usage), sampler_(sampler) {} - -void GTZANNode::Print(std::ostream &out) const { out << Name(); } - -std::shared_ptr GTZANNode::Copy() { - std::shared_ptr sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy(); - auto node = std::make_shared(dataset_dir_, usage_, sampler, cache_); - (void)node->SetNumWorkers(num_workers_); - (void)node->SetConnectorQueueSize(connector_que_size_); - return node; -} - -Status GTZANNode::ValidateParams() { - RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("GTZANDataset", dataset_dir_)); - RETURN_IF_NOT_OK(ValidateDatasetSampler("GTZANDataset", sampler_)); - RETURN_IF_NOT_OK(ValidateStringValue("GTZANDataset", usage_, {"train", "valid", "test", "all"})); - return Status::OK(); -} - -Status GTZANNode::Build(std::vector> *const node_ops) { - // Do internal Schema generation. - auto schema = std::make_unique(); - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT64), TensorImpl::kCv, 1))); - TensorShape scalar_rate = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); - TensorShape scalar_label = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK( - schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_label))); - std::shared_ptr sampler_rt = nullptr; - RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); - auto op = std::make_shared(usage_, num_workers_, dataset_dir_, connector_que_size_, std::move(schema), - std::move(sampler_rt)); - op->SetTotalRepeats(GetTotalRepeats()); - op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - node_ops->push_back(op); - return Status::OK(); -} - -// Get the shard id of node. -Status GTZANNode::GetShardId(int32_t *shard_id) { - *shard_id = sampler_->ShardId(); - return Status::OK(); -} - -// Get Dataset size. -Status GTZANNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) { - if (dataset_size_ > 0) { - *dataset_size = dataset_size_; - return Status::OK(); - } - int64_t num_rows, sample_size; - RETURN_IF_NOT_OK(GTZANOp::CountTotalRows(dataset_dir_, usage_, &num_rows)); - std::shared_ptr sampler_rt = nullptr; - RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); - sample_size = sampler_rt->CalculateNumSamples(num_rows); - if (sample_size == -1) { - RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size)); - } - *dataset_size = sample_size; - dataset_size_ = *dataset_size; - return Status::OK(); -} - -Status GTZANNode::to_json(nlohmann::json *out_json) { - nlohmann::json args, sampler_args; - RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args)); - args["sampler"] = sampler_args; - args["num_parallel_workers"] = num_workers_; - args["connector_queue_size"] = connector_que_size_; - args["dataset_dir"] = dataset_dir_; - args["usage"] = usage_; - if (cache_ != nullptr) { - nlohmann::json cache_args; - RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); - args["cache"] = cache_args; - } - *out_json = args; - return Status::OK(); -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2022-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/gtzan_node.h" + +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/gtzan_op.h" +#include "mindspore-lite/minddata/dataset/util/status.h" + +namespace mindspore::lite { +namespace dataset { +GTZANNode::GTZANNode(const std::string &dataset_dir, const std::string &usage, std::shared_ptr sampler, + std::shared_ptr cache) + : MappableSourceNode(std::move(cache)), dataset_dir_(dataset_dir), usage_(usage), sampler_(sampler) {} + +void GTZANNode::Print(std::ostream &out) const { out << Name(); } + +std::shared_ptr GTZANNode::Copy() { + std::shared_ptr sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy(); + auto node = std::make_shared(dataset_dir_, usage_, sampler, cache_); + (void)node->SetNumWorkers(num_workers_); + (void)node->SetConnectorQueueSize(connector_que_size_); + return node; +} + +Status GTZANNode::ValidateParams() { + RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("GTZANDataset", dataset_dir_)); + RETURN_IF_NOT_OK(ValidateDatasetSampler("GTZANDataset", sampler_)); + RETURN_IF_NOT_OK(ValidateStringValue("GTZANDataset", usage_, {"train", "valid", "test", "all"})); + return Status::OK(); +} + +Status GTZANNode::Build(std::vector> *const node_ops) { + // Do internal Schema generation. + auto schema = std::make_unique(); + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT64), TensorImpl::kCv, 1))); + TensorShape scalar_rate = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); + TensorShape scalar_label = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK( + schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_label))); + std::shared_ptr sampler_rt = nullptr; + RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); + auto op = std::make_shared(usage_, num_workers_, dataset_dir_, connector_que_size_, std::move(schema), + std::move(sampler_rt)); + op->SetTotalRepeats(GetTotalRepeats()); + op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + node_ops->push_back(op); + return Status::OK(); +} + +// Get the shard id of node. +Status GTZANNode::GetShardId(int32_t *shard_id) { + *shard_id = sampler_->ShardId(); + return Status::OK(); +} + +// Get Dataset size. +Status GTZANNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) { + if (dataset_size_ > 0) { + *dataset_size = dataset_size_; + return Status::OK(); + } + int64_t num_rows, sample_size; + RETURN_IF_NOT_OK(GTZANOp::CountTotalRows(dataset_dir_, usage_, &num_rows)); + std::shared_ptr sampler_rt = nullptr; + RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); + sample_size = sampler_rt->CalculateNumSamples(num_rows); + if (sample_size == -1) { + RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size)); + } + *dataset_size = sample_size; + dataset_size_ = *dataset_size; + return Status::OK(); +} + +Status GTZANNode::to_json(nlohmann::json *out_json) { + nlohmann::json args, sampler_args; + RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args)); + args["sampler"] = sampler_args; + args["num_parallel_workers"] = num_workers_; + args["connector_queue_size"] = connector_que_size_; + args["dataset_dir"] = dataset_dir_; + args["usage"] = usage_; + if (cache_ != nullptr) { + nlohmann::json cache_args; + RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); + args["cache"] = cache_args; + } + *out_json = args; + return Status::OK(); +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/gtzan_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/gtzan_node.h index 1f239b73..e065694c 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/gtzan_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/gtzan_node.h @@ -1,95 +1,95 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_GTZAN_NODE_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_GTZAN_NODE_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" - -namespace mindspore { -namespace dataset { -class GTZANNode : public MappableSourceNode { - public: - /// \brief Constructor - GTZANNode(const std::string &dataset_dir, const std::string &usage, std::shared_ptr sampler, - std::shared_ptr cache); - - /// \brief Destructor - ~GTZANNode() = default; - - /// \brief Node name getter. - /// \return Name of the current node. - std::string Name() const override { return "kGTZANNode"; } - - /// \brief Print the description. - /// \param out The output stream to write output to. - void Print(std::ostream &out) const override; - - /// \brief Copy the node to a new object. - /// \return A shared pointer to the new copy. - std::shared_ptr Copy() override; - - /// \brief a base class override function to create the required runtime dataset op objects for this class. - /// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create. - /// \return Status Status::OK() if build successfully. - Status Build(std::vector> *const node_ops) override; - - /// \brief Parameters validation. - /// \return Status Status::OK() if all the parameters are valid. - Status ValidateParams() override; - - /// \brief Get the shard id of node. - /// \param[in] shard_id The shard ID within num_shards. - /// \return Status Status::OK() if get shard id successfully. - Status GetShardId(int32_t *shard_id) override; - - /// \brief Base-class override for GetDatasetSize. - /// \param[in] size_getter Shared pointer to DatasetSizeGetter. - /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting - /// dataset size at the expense of accuracy. - /// \param[out] dataset_size the size of the dataset. - /// \return Status of the function. - Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) override; - - /// \brief Getter functions. - const std::string &DatasetDir() const { return dataset_dir_; } - const std::string &Usage() const { return usage_; } - - /// \brief Get the arguments of node. - /// \param[out] out_json JSON string of all attributes. - /// \return Status of the function. - Status to_json(nlohmann::json *out_json) override; - - /// \brief Sampler getter. - /// \return SamplerObj of the current node. - std::shared_ptr Sampler() override { return sampler_; } - - /// \brief Sampler setter. - /// \param[in] sampler Tells GTZANOp what to read. - void SetSampler(std::shared_ptr sampler) override { sampler_ = sampler; } - - private: - std::string dataset_dir_; - std::string usage_; - std::shared_ptr sampler_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_GTZAN_NODE_H_ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_GTZAN_NODE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_GTZAN_NODE_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" + +namespace mindspore::lite { +namespace dataset { +class GTZANNode : public MappableSourceNode { + public: + /// \brief Constructor + GTZANNode(const std::string &dataset_dir, const std::string &usage, std::shared_ptr sampler, + std::shared_ptr cache); + + /// \brief Destructor + ~GTZANNode() = default; + + /// \brief Node name getter. + /// \return Name of the current node. + std::string Name() const override { return "kGTZANNode"; } + + /// \brief Print the description. + /// \param out The output stream to write output to. + void Print(std::ostream &out) const override; + + /// \brief Copy the node to a new object. + /// \return A shared pointer to the new copy. + std::shared_ptr Copy() override; + + /// \brief a base class override function to create the required runtime dataset op objects for this class. + /// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create. + /// \return Status Status::OK() if build successfully. + Status Build(std::vector> *const node_ops) override; + + /// \brief Parameters validation. + /// \return Status Status::OK() if all the parameters are valid. + Status ValidateParams() override; + + /// \brief Get the shard id of node. + /// \param[in] shard_id The shard ID within num_shards. + /// \return Status Status::OK() if get shard id successfully. + Status GetShardId(int32_t *shard_id) override; + + /// \brief Base-class override for GetDatasetSize. + /// \param[in] size_getter Shared pointer to DatasetSizeGetter. + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting + /// dataset size at the expense of accuracy. + /// \param[out] dataset_size the size of the dataset. + /// \return Status of the function. + Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) override; + + /// \brief Getter functions. + const std::string &DatasetDir() const { return dataset_dir_; } + const std::string &Usage() const { return usage_; } + + /// \brief Get the arguments of node. + /// \param[out] out_json JSON string of all attributes. + /// \return Status of the function. + Status to_json(nlohmann::json *out_json) override; + + /// \brief Sampler getter. + /// \return SamplerObj of the current node. + std::shared_ptr Sampler() override { return sampler_; } + + /// \brief Sampler setter. + /// \param[in] sampler Tells GTZANOp what to read. + void SetSampler(std::shared_ptr sampler) override { sampler_ = sampler; } + + private: + std::string dataset_dir_; + std::string usage_; + std::shared_ptr sampler_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_GTZAN_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/image_folder_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/image_folder_node.cc index 3758f31a..ee6ba080 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/image_folder_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/image_folder_node.cc @@ -29,7 +29,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #ifdef ENABLE_PYTHON @@ -190,4 +190,4 @@ Status ImageFolderNode::from_json(nlohmann::json json_obj, std::shared_ptr sampler, std::shared_ptr cache = nullptr) @@ -140,4 +140,4 @@ Status IMDBNode::from_json(nlohmann::json json_obj, std::shared_ptr } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/imdb_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/imdb_node.h index 6676a8c1..221f5fe4 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/imdb_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/imdb_node.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class IMDBNode /// \brief A Dataset derived class to represent IMDB dataset @@ -109,5 +109,5 @@ class IMDBNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_IMDB_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2016_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2016_node.cc index 2e4ff050..d9b5dae9 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2016_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2016_node.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/iwslt_op.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for IWSLT2016Node. IWSLT2016Node::IWSLT2016Node(const std::string &dataset_dir, const std::string &usage, @@ -190,4 +190,4 @@ Status IWSLT2016Node::MakeSimpleProducer() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2016_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2016_node.h index 9d6ec94a..ebd93660 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2016_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2016_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class IWSLT2016Node. /// \brief A Node derived class to represent IWSLT2016Node. @@ -133,5 +133,5 @@ class IWSLT2016Node : public NonMappableSourceNode { std::map> support_language_pair_map_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_IWSLT2016_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2017_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2017_node.cc index 790a7c5c..bd1c6632 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2017_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2017_node.cc @@ -22,7 +22,7 @@ #include "include/common/debug/common.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/source/iwslt_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for IWSLT2017Node. IWSLT2017Node::IWSLT2017Node(const std::string &dataset_dir, const std::string &usage, @@ -181,4 +181,4 @@ Status IWSLT2017Node::MakeSimpleProducer() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2017_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2017_node.h index a9000bdb..fa25b5ec 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2017_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/iwslt2017_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class IWSLT2017Node. /// \brief A Node derived class to represent IWSLT2017Node. @@ -129,5 +129,5 @@ class IWSLT2017Node : public NonMappableSourceNode { std::map> support_language_pair_map_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_IWSLT2017_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kitti_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kitti_node.cc index d7f60f50..8f7f5c6e 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kitti_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kitti_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/kitti_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for KITTINode KITTINode::KITTINode(const std::string &dataset_dir, const std::string &usage, bool decode, @@ -139,4 +139,4 @@ Status KITTINode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kitti_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kitti_node.h index cef9dfeb..5a6475bc 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kitti_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kitti_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class KITTINode : public MappableSourceNode { public: @@ -100,5 +100,5 @@ class KITTINode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_KITTI_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kmnist_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kmnist_node.cc index d0bb88ac..2b711696 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kmnist_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kmnist_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/kmnist_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { KMnistNode::KMnistNode(const std::string &dataset_dir, const std::string &usage, std::shared_ptr sampler, std::shared_ptr cache) @@ -111,4 +111,4 @@ Status KMnistNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kmnist_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kmnist_node.h index aa5ff565..3a54cb9c 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kmnist_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/kmnist_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class KMnistNode : public MappableSourceNode { public: @@ -97,5 +97,5 @@ class KMnistNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_KMNIST_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lfw_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lfw_node.cc index 2c012bb5..49755517 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lfw_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lfw_node.cc @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/engine/serdes.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { LFWNode::LFWNode(const std::string &dataset_dir, const std::string &task, const std::string &usage, const std::string &image_set, bool decode, const std::shared_ptr &sampler, @@ -162,4 +162,4 @@ Status LFWNode::from_json(nlohmann::json json_obj, std::shared_ptr } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lfw_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lfw_node.h index 1de38a68..55b255e3 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lfw_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lfw_node.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class LFWNode /// \brief A Dataset derived class to represent LFW dataset. @@ -119,5 +119,5 @@ class LFWNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_LFW_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/libri_tts_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/libri_tts_node.cc index 2b97bfb8..83778d53 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/libri_tts_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/libri_tts_node.cc @@ -1,121 +1,121 @@ -/** - * Copyright 2022-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/libri_tts_node.h" - -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.h" -#include "mindspore-lite/minddata/dataset/util/status.h" - -namespace mindspore { -namespace dataset { -LibriTTSNode::LibriTTSNode(const std::string &dataset_dir, const std::string &usage, - std::shared_ptr sampler, std::shared_ptr cache) - : MappableSourceNode(std::move(cache)), dataset_dir_(dataset_dir), usage_(usage), sampler_(sampler) {} - -void LibriTTSNode::Print(std::ostream &out) const { out << Name(); } - -std::shared_ptr LibriTTSNode::Copy() { - std::shared_ptr sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy(); - auto node = std::make_shared(dataset_dir_, usage_, sampler, cache_); - (void)node->SetNumWorkers(num_workers_); - (void)node->SetConnectorQueueSize(connector_que_size_); - return node; -} - -Status LibriTTSNode::ValidateParams() { - RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("LibriTTSDataset", dataset_dir_)); - RETURN_IF_NOT_OK(ValidateDatasetSampler("LibriTTSDataset", sampler_)); - RETURN_IF_NOT_OK(ValidateStringValue("LibriTTSDataset", usage_, - {"dev-clean", "dev-other", "test-clean", "test-other", "train-clean-100", - "train-clean-360", "train-other-500", "all"})); - return Status::OK(); -} - -Status LibriTTSNode::GetShardId(int32_t *shard_id) { - *shard_id = sampler_->ShardId(); - return Status::OK(); -} - -Status LibriTTSNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) { - if (dataset_size_ > 0) { - *dataset_size = dataset_size_; - return Status::OK(); - } - int64_t num_rows, sample_size; - RETURN_IF_NOT_OK(LibriTTSOp::CountTotalRows(dataset_dir_, usage_, &num_rows)); - std::shared_ptr sampler_rt = nullptr; - RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); - sample_size = sampler_rt->CalculateNumSamples(num_rows); - if (sample_size == -1) { - RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size)); - } - *dataset_size = sample_size; - dataset_size_ = *dataset_size; - return Status::OK(); -} - -Status LibriTTSNode::Build(std::vector> *const node_ops) { - auto schema = std::make_unique(); - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kCv, 1))); - TensorShape scalar_rate = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); - TensorShape scalar_original_text = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("original_text", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_original_text))); - TensorShape scalar_normalized_text = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("normalized_text", DataType(DataType::DE_STRING), - TensorImpl::kFlexible, 0, &scalar_normalized_text))); - TensorShape scalar_speaker_id = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("speaker_id", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_speaker_id))); - TensorShape scalar_chapter_id = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("chapter_id", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_chapter_id))); - TensorShape scalar_utterance_id = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK(schema->AddColumn( - ColDescriptor("utterance_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance_id))); - std::shared_ptr sampler_rt = nullptr; - RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); - auto op = std::make_shared(dataset_dir_, usage_, num_workers_, connector_que_size_, std::move(schema), - std::move(sampler_rt)); - op->SetTotalRepeats(GetTotalRepeats()); - op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - node_ops->push_back(op); - return Status::OK(); -} - -Status LibriTTSNode::to_json(nlohmann::json *out_json) { - nlohmann::json args, sampler_args; - RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args)); - args["sampler"] = sampler_args; - args["num_parallel_workers"] = num_workers_; - args["connector_queue_size"] = connector_que_size_; - args["dataset_dir"] = dataset_dir_; - args["usage"] = usage_; - if (cache_ != nullptr) { - nlohmann::json cache_args; - RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); - args["cache"] = cache_args; - } - *out_json = args; - return Status::OK(); -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2022-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/libri_tts_node.h" + +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/libri_tts_op.h" +#include "mindspore-lite/minddata/dataset/util/status.h" + +namespace mindspore::lite { +namespace dataset { +LibriTTSNode::LibriTTSNode(const std::string &dataset_dir, const std::string &usage, + std::shared_ptr sampler, std::shared_ptr cache) + : MappableSourceNode(std::move(cache)), dataset_dir_(dataset_dir), usage_(usage), sampler_(sampler) {} + +void LibriTTSNode::Print(std::ostream &out) const { out << Name(); } + +std::shared_ptr LibriTTSNode::Copy() { + std::shared_ptr sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy(); + auto node = std::make_shared(dataset_dir_, usage_, sampler, cache_); + (void)node->SetNumWorkers(num_workers_); + (void)node->SetConnectorQueueSize(connector_que_size_); + return node; +} + +Status LibriTTSNode::ValidateParams() { + RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("LibriTTSDataset", dataset_dir_)); + RETURN_IF_NOT_OK(ValidateDatasetSampler("LibriTTSDataset", sampler_)); + RETURN_IF_NOT_OK(ValidateStringValue("LibriTTSDataset", usage_, + {"dev-clean", "dev-other", "test-clean", "test-other", "train-clean-100", + "train-clean-360", "train-other-500", "all"})); + return Status::OK(); +} + +Status LibriTTSNode::GetShardId(int32_t *shard_id) { + *shard_id = sampler_->ShardId(); + return Status::OK(); +} + +Status LibriTTSNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) { + if (dataset_size_ > 0) { + *dataset_size = dataset_size_; + return Status::OK(); + } + int64_t num_rows, sample_size; + RETURN_IF_NOT_OK(LibriTTSOp::CountTotalRows(dataset_dir_, usage_, &num_rows)); + std::shared_ptr sampler_rt = nullptr; + RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); + sample_size = sampler_rt->CalculateNumSamples(num_rows); + if (sample_size == -1) { + RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size)); + } + *dataset_size = sample_size; + dataset_size_ = *dataset_size; + return Status::OK(); +} + +Status LibriTTSNode::Build(std::vector> *const node_ops) { + auto schema = std::make_unique(); + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("waveform", DataType(DataType::DE_FLOAT32), TensorImpl::kCv, 1))); + TensorShape scalar_rate = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("sample_rate", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_rate))); + TensorShape scalar_original_text = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("original_text", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_original_text))); + TensorShape scalar_normalized_text = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("normalized_text", DataType(DataType::DE_STRING), + TensorImpl::kFlexible, 0, &scalar_normalized_text))); + TensorShape scalar_speaker_id = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("speaker_id", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_speaker_id))); + TensorShape scalar_chapter_id = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("chapter_id", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar_chapter_id))); + TensorShape scalar_utterance_id = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK(schema->AddColumn( + ColDescriptor("utterance_id", DataType(DataType::DE_STRING), TensorImpl::kFlexible, 0, &scalar_utterance_id))); + std::shared_ptr sampler_rt = nullptr; + RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); + auto op = std::make_shared(dataset_dir_, usage_, num_workers_, connector_que_size_, std::move(schema), + std::move(sampler_rt)); + op->SetTotalRepeats(GetTotalRepeats()); + op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + node_ops->push_back(op); + return Status::OK(); +} + +Status LibriTTSNode::to_json(nlohmann::json *out_json) { + nlohmann::json args, sampler_args; + RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args)); + args["sampler"] = sampler_args; + args["num_parallel_workers"] = num_workers_; + args["connector_queue_size"] = connector_que_size_; + args["dataset_dir"] = dataset_dir_; + args["usage"] = usage_; + if (cache_ != nullptr) { + nlohmann::json cache_args; + RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); + args["cache"] = cache_args; + } + *out_json = args; + return Status::OK(); +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/libri_tts_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/libri_tts_node.h index 28f8fcb2..db498586 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/libri_tts_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/libri_tts_node.h @@ -1,95 +1,95 @@ -/** - * Copyright 2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_LIBRI_TTS_NODE_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_LIBRI_TTS_NODE_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" - -namespace mindspore { -namespace dataset { -class LibriTTSNode : public MappableSourceNode { - public: - /// \brief Constructor. - LibriTTSNode(const std::string &dataset_dir, const std::string &usage, std::shared_ptr sampler, - std::shared_ptr cache); - - /// \brief Destructor. - ~LibriTTSNode() = default; - - /// \brief Node name getter. - /// \return Name of the current node. - std::string Name() const override { return kLibriTTSNode; } - - /// \brief Print the description. - /// \param out The output stream to write output to. - void Print(std::ostream &out) const override; - - /// \brief Copy the node to a new object. - /// \return A shared pointer to the new copy. - std::shared_ptr Copy() override; - - /// \brief a base class override function to create the required runtime dataset op objects for this class. - /// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create. - /// \return Status Status::OK() if build successfully. - Status Build(std::vector> *const node_ops) override; - - /// \brief Parameters validation. - /// \return Status Status::OK() if all the parameters are valid. - Status ValidateParams() override; - - /// \brief Get the shard id of node. - /// \param[in] shard_id The shard ID within num_shards. - /// \return Status Status::OK() if get shard id successfully. - Status GetShardId(int32_t *shard_id) override; - - /// \brief Base-class override for GetDatasetSize. - /// \param[in] size_getter Shared pointer to DatasetSizeGetter. - /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting - /// dataset size at the expense of accuracy. - /// \param[out] dataset_size the size of the dataset. - /// \return Status of the function. - Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) override; - - /// \brief Getter functions. - const std::string &DatasetDir() const { return dataset_dir_; } - const std::string &usage() const { return usage_; } - - /// \brief Get the arguments of node. - /// \param[out] out_json JSON string of all attributes. - /// \return Status of the function. - Status to_json(nlohmann::json *out_json) override; - - /// \brief Sampler getter. - /// \return SamplerObj of the current node. - std::shared_ptr Sampler() override { return sampler_; } - - /// \brief Sampler setter. - /// \param[in] sampler Tells LibriTTSOp what to read. - void SetSampler(std::shared_ptr sampler) override { sampler_ = sampler; } - - private: - std::string dataset_dir_; - std::string usage_; - std::shared_ptr sampler_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_LIBRI_TTS_NODE_H_ +/** + * Copyright 2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_LIBRI_TTS_NODE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_LIBRI_TTS_NODE_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" + +namespace mindspore::lite { +namespace dataset { +class LibriTTSNode : public MappableSourceNode { + public: + /// \brief Constructor. + LibriTTSNode(const std::string &dataset_dir, const std::string &usage, std::shared_ptr sampler, + std::shared_ptr cache); + + /// \brief Destructor. + ~LibriTTSNode() = default; + + /// \brief Node name getter. + /// \return Name of the current node. + std::string Name() const override { return kLibriTTSNode; } + + /// \brief Print the description. + /// \param out The output stream to write output to. + void Print(std::ostream &out) const override; + + /// \brief Copy the node to a new object. + /// \return A shared pointer to the new copy. + std::shared_ptr Copy() override; + + /// \brief a base class override function to create the required runtime dataset op objects for this class. + /// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create. + /// \return Status Status::OK() if build successfully. + Status Build(std::vector> *const node_ops) override; + + /// \brief Parameters validation. + /// \return Status Status::OK() if all the parameters are valid. + Status ValidateParams() override; + + /// \brief Get the shard id of node. + /// \param[in] shard_id The shard ID within num_shards. + /// \return Status Status::OK() if get shard id successfully. + Status GetShardId(int32_t *shard_id) override; + + /// \brief Base-class override for GetDatasetSize. + /// \param[in] size_getter Shared pointer to DatasetSizeGetter. + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting + /// dataset size at the expense of accuracy. + /// \param[out] dataset_size the size of the dataset. + /// \return Status of the function. + Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) override; + + /// \brief Getter functions. + const std::string &DatasetDir() const { return dataset_dir_; } + const std::string &usage() const { return usage_; } + + /// \brief Get the arguments of node. + /// \param[out] out_json JSON string of all attributes. + /// \return Status of the function. + Status to_json(nlohmann::json *out_json) override; + + /// \brief Sampler getter. + /// \return SamplerObj of the current node. + std::shared_ptr Sampler() override { return sampler_; } + + /// \brief Sampler setter. + /// \param[in] sampler Tells LibriTTSOp what to read. + void SetSampler(std::shared_ptr sampler) override { sampler_ = sampler; } + + private: + std::string dataset_dir_; + std::string usage_; + std::shared_ptr sampler_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_LIBRI_TTS_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lj_speech_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lj_speech_node.cc index 63d17ad3..4f37fa51 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lj_speech_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lj_speech_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/lj_speech_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for LJSpeechNode. LJSpeechNode::LJSpeechNode(const std::string &dataset_dir, std::shared_ptr sampler, @@ -117,4 +117,4 @@ Status LJSpeechNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lj_speech_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lj_speech_node.h index ab5504f9..062c5366 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lj_speech_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lj_speech_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Read LJSpeech dataset. class LJSpeechNode : public MappableSourceNode { @@ -91,5 +91,5 @@ class LJSpeechNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_LJ_SPEECH_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lsun_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lsun_node.cc index ae296bfc..ff60bb40 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lsun_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lsun_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/lsun_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { LSUNNode::LSUNNode(const std::string &dataset_dir, const std::string &usage, const std::vector &classes, bool decode, std::shared_ptr sampler, std::shared_ptr cache = nullptr) @@ -122,4 +122,4 @@ Status LSUNNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lsun_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lsun_node.h index f6424d4a..e5537fd6 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lsun_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/lsun_node.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class LSUNNode /// \brief A Dataset derived class to represent LSUN dataset @@ -107,5 +107,5 @@ class LSUNNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_LSUN_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/manifest_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/manifest_node.cc index 360ae9b5..4023e3ac 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/manifest_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/manifest_node.cc @@ -28,7 +28,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { ManifestNode::ManifestNode(const std::string &dataset_file, const std::string &usage, @@ -186,4 +186,4 @@ Status ManifestNode::from_json(nlohmann::json json_obj, std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_MANIFEST_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/minddata_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/minddata_node.cc index b754f430..02eb8e48 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/minddata_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/minddata_node.cc @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { MindDataNode::MindDataNode(const std::vector &dataset_files, const std::vector &columns_list, @@ -260,4 +260,4 @@ Status MindDataNode::AcceptAfter(IRNodePass *const p, bool *const modified) { return p->VisitAfter(shared_from_base(), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/minddata_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/minddata_node.h index 5b99471f..0446df47 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/minddata_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/minddata_node.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/mindrecord_op.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class MindDataNode : public MappableSourceNode { public: @@ -120,5 +120,5 @@ class MindDataNode : public MappableSourceNode { ShuffleMode shuffle_mode_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_MINDDATA_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/mnist_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/mnist_node.cc index cc7e9f6e..1aa4912a 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/mnist_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/mnist_node.cc @@ -27,7 +27,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { MnistNode::MnistNode(std::string dataset_dir, std::string usage, std::shared_ptr sampler, @@ -137,4 +137,4 @@ Status MnistNode::from_json(nlohmann::json json_obj, std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_MNIST_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/multi30k_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/multi30k_node.cc index ed0e11db..eca87543 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/multi30k_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/multi30k_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/multi30k_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Multi30kNode::Multi30kNode(const std::string &dataset_dir, const std::string &usage, const std::vector &language_pair, int32_t num_samples, ShuffleMode shuffle, @@ -196,4 +196,4 @@ std::vector Multi30kNode::WalkAllFiles(const std::string &usage, co return multi30k_files_list; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/multi30k_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/multi30k_node.h index 42217dfc..7c2aad1f 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/multi30k_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/multi30k_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class Multi30kNode : public NonMappableSourceNode { public: @@ -130,5 +130,5 @@ class Multi30kNode : public NonMappableSourceNode { std::vector multi30k_files_list_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_MULTI30K_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/omniglot_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/omniglot_node.cc index 7af9e38f..5ec0456a 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/omniglot_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/omniglot_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/omniglot_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { OmniglotNode::OmniglotNode(const std::string &dataset_dir, bool background, bool decode, const std::shared_ptr &sampler, const std::shared_ptr &cache) @@ -122,4 +122,4 @@ Status OmniglotNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/omniglot_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/omniglot_node.h index ab87cee8..8e980c93 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/omniglot_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/omniglot_node.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/cache/dataset_cache.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class OmniglotNode /// \brief A Dataset derived class to represent Omniglot dataset. @@ -99,5 +99,5 @@ class OmniglotNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_OMNIGLOT_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/penn_treebank_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/penn_treebank_node.cc index 79018092..6cf2e873 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/penn_treebank_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/penn_treebank_node.cc @@ -1,200 +1,200 @@ -/** - * Copyright 2021-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/penn_treebank_node.h" - -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.h" -#include "mindspore-lite/minddata/dataset/util/status.h" - -namespace mindspore { -namespace dataset { -// Constructor for PennTreebankNode. -PennTreebankNode::PennTreebankNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, - ShuffleMode shuffle, int32_t num_shards, int32_t shard_id, - const std::shared_ptr &cache) - : NonMappableSourceNode(std::move(cache)), - dataset_dir_(dataset_dir), - usage_(usage), - num_samples_(num_samples), - shuffle_(shuffle), - num_shards_(num_shards), - shard_id_(shard_id), - penn_treebank_files_list_(WalkAllFiles(usage, dataset_dir)) { - // Update the num_shards_ in global context. this number is only used for now by auto_num_worker_pass. User discretion - // is advised. Auto_num_worker_pass is currently an experimental feature which can still work if the num_shards_ isn't - // 100% correct. The reason behind is for now, PreBuildSampler doesn't offer a way to return num_shards. Once - // PreBuildSampler is phased out, this can be cleaned up. - GlobalContext::config_manager()->set_num_shards_for_auto_num_workers(num_shards_); -} - -std::shared_ptr PennTreebankNode::Copy() { - auto node = - std::make_shared(dataset_dir_, usage_, num_samples_, shuffle_, num_shards_, shard_id_, cache_); - (void)node->SetNumWorkers(num_workers_); - (void)node->SetConnectorQueueSize(connector_que_size_); - return node; -} - -void PennTreebankNode::Print(std::ostream &out) const { - out << (Name() + "(cache: " + ((cache_ != nullptr) ? "true" : "false") + - ", num_shards: " + std::to_string(num_shards_) + ", shard_id: " + std::to_string(shard_id_) + ")"); -} - -Status PennTreebankNode::ValidateParams() { - RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("PennTreebankNode", dataset_dir_)); - RETURN_IF_NOT_OK(ValidateStringValue("PennTreebankNode", usage_, {"train", "test", "valid", "all"})); - RETURN_IF_NOT_OK(ValidateEnum("PennTreebankNode", "ShuffleMode", shuffle_, - {ShuffleMode::kFalse, ShuffleMode::kFiles, ShuffleMode::kGlobal})); - if (num_samples_ < 0) { - std::string err_msg = "PennTreebankNode: Invalid number of samples: " + std::to_string(num_samples_); - LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg); - } - RETURN_IF_NOT_OK(ValidateDatasetShardParams("PennTreebankNode", num_shards_, shard_id_)); - return Status::OK(); -} - -// Function to build PennTreebankNode. -Status PennTreebankNode::Build(std::vector> *const node_ops) { - bool shuffle_files = (shuffle_ == ShuffleMode::kGlobal || shuffle_ == ShuffleMode::kFiles); - // Sort the dataset files in a lexicographical order. - std::vector sorted_dataset_files = penn_treebank_files_list_; - std::sort(sorted_dataset_files.begin(), sorted_dataset_files.end()); - // Do internal Schema generation. - auto schema = std::make_unique(); - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("text", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); - // Create and initialize PennTreebankNode. - std::shared_ptr penn_treebank_op = - std::make_shared(num_workers_, num_samples_, worker_connector_size_, std::move(schema), - sorted_dataset_files, connector_que_size_, shuffle_files, num_shards_, shard_id_); - RETURN_IF_NOT_OK(penn_treebank_op->Init()); - // If a global shuffle is used for PennTreebank, it will inject a shuffle op over the PennTreebank. - // But, if there is a cache in the tree, we do not need the global shuffle and the shuffle op should not be built. - // This is achieved in the cache transform pass where we call MakeSimpleProducer to reset PennTreebank's shuffle - // option to false. - if (shuffle_ == ShuffleMode::kGlobal) { - // Inject ShuffleOp. - std::shared_ptr shuffle_op = nullptr; - int64_t num_rows = 0; - // First, get the number of rows in the dataset. - RETURN_IF_NOT_OK(PennTreebankOp::CountAllFileRows(penn_treebank_files_list_, &num_rows)); - // Add the shuffle op after this op. - RETURN_IF_NOT_OK( - AddShuffleOp(sorted_dataset_files.size(), num_shards_, num_rows, 0, connector_que_size_, &shuffle_op)); - shuffle_op->SetTotalRepeats(GetTotalRepeats()); - shuffle_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - shuffle_op->Skip(skip_steps_); - node_ops->push_back(shuffle_op); - } - penn_treebank_op->SetTotalRepeats(GetTotalRepeats()); - penn_treebank_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - // Add PennTreebankNode. - node_ops->push_back(penn_treebank_op); - return Status::OK(); -} - -// Get the shard id of node. -Status PennTreebankNode::GetShardId(int32_t *shard_id) { - *shard_id = shard_id_; - return Status::OK(); -} - -// Get Dataset size. -Status PennTreebankNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) { - if (dataset_size_ > 0) { - *dataset_size = dataset_size_; - return Status::OK(); - } - int64_t num_rows, sample_size = num_samples_; - RETURN_IF_NOT_OK(PennTreebankOp::CountAllFileRows(penn_treebank_files_list_, &num_rows)); - num_rows = static_cast(ceil(num_rows / (1.0 * num_shards_))); - *dataset_size = sample_size > 0 ? std::min(num_rows, sample_size) : num_rows; - dataset_size_ = *dataset_size; - return Status::OK(); -} - -Status PennTreebankNode::to_json(nlohmann::json *out_json) { - nlohmann::json args; - args["num_parallel_workers"] = num_workers_; - args["connector_queue_size"] = connector_que_size_; - args["dataset_dir"] = dataset_dir_; - args["usage"] = usage_; - args["num_samples"] = num_samples_; - args["shuffle"] = shuffle_; - args["num_shards"] = num_shards_; - args["shard_id"] = shard_id_; - if (cache_ != nullptr) { - nlohmann::json cache_args; - RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); - args["cache"] = cache_args; - } - *out_json = args; - return Status::OK(); -} - -// Note: The following two functions are common among NonMappableSourceNode and should be promoted to its parent class. -// PennTreebank by itself is a non-mappable dataset that does not support sampling. -// However, if a cache operator is injected at some other place higher in the tree, that cache can -// inherit this sampler from the leaf, providing sampling support from the caching layer. -// That is why we setup the sampler for a leaf node that does not use sampling. -Status PennTreebankNode::SetupSamplerForCache(std::shared_ptr *sampler) { - *sampler = SelectSampler(num_samples_, shuffle_, num_shards_, shard_id_); - return Status::OK(); -} - -// If a cache has been added into the ascendant tree over this PennTreebank node, then the cache will be executing -// a sampler for fetching the data. As such, any options in the PennTreebank node need to be reset to its defaults so -// that this PennTreebank node will produce the full set of data into the cache. -Status PennTreebankNode::MakeSimpleProducer() { - shard_id_ = 0; - num_shards_ = 1; - shuffle_ = ShuffleMode::kFalse; - num_samples_ = 0; - return Status::OK(); -} - -std::vector PennTreebankNode::WalkAllFiles(const std::string &usage, const std::string &dataset_dir) { - std::vector penn_treebank_files_list; - Path train_prefix("ptb.train.txt"); - Path test_prefix("ptb.test.txt"); - Path valid_prefix("ptb.valid.txt"); - Path dir(dataset_dir); - - if (usage == "train") { - Path temp_path = dir / train_prefix; - penn_treebank_files_list.push_back(temp_path.ToString()); - } else if (usage == "test") { - Path temp_path = dir / test_prefix; - penn_treebank_files_list.push_back(temp_path.ToString()); - } else if (usage == "valid") { - Path temp_path = dir / valid_prefix; - penn_treebank_files_list.push_back(temp_path.ToString()); - } else { - Path temp_path = dir / train_prefix; - penn_treebank_files_list.push_back(temp_path.ToString()); - Path temp_path1 = dir / test_prefix; - penn_treebank_files_list.push_back(temp_path1.ToString()); - Path temp_path2 = dir / valid_prefix; - penn_treebank_files_list.push_back(temp_path2.ToString()); - } - return penn_treebank_files_list; -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/penn_treebank_node.h" + +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/penn_treebank_op.h" +#include "mindspore-lite/minddata/dataset/util/status.h" + +namespace mindspore::lite { +namespace dataset { +// Constructor for PennTreebankNode. +PennTreebankNode::PennTreebankNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, + ShuffleMode shuffle, int32_t num_shards, int32_t shard_id, + const std::shared_ptr &cache) + : NonMappableSourceNode(std::move(cache)), + dataset_dir_(dataset_dir), + usage_(usage), + num_samples_(num_samples), + shuffle_(shuffle), + num_shards_(num_shards), + shard_id_(shard_id), + penn_treebank_files_list_(WalkAllFiles(usage, dataset_dir)) { + // Update the num_shards_ in global context. this number is only used for now by auto_num_worker_pass. User discretion + // is advised. Auto_num_worker_pass is currently an experimental feature which can still work if the num_shards_ isn't + // 100% correct. The reason behind is for now, PreBuildSampler doesn't offer a way to return num_shards. Once + // PreBuildSampler is phased out, this can be cleaned up. + GlobalContext::config_manager()->set_num_shards_for_auto_num_workers(num_shards_); +} + +std::shared_ptr PennTreebankNode::Copy() { + auto node = + std::make_shared(dataset_dir_, usage_, num_samples_, shuffle_, num_shards_, shard_id_, cache_); + (void)node->SetNumWorkers(num_workers_); + (void)node->SetConnectorQueueSize(connector_que_size_); + return node; +} + +void PennTreebankNode::Print(std::ostream &out) const { + out << (Name() + "(cache: " + ((cache_ != nullptr) ? "true" : "false") + + ", num_shards: " + std::to_string(num_shards_) + ", shard_id: " + std::to_string(shard_id_) + ")"); +} + +Status PennTreebankNode::ValidateParams() { + RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("PennTreebankNode", dataset_dir_)); + RETURN_IF_NOT_OK(ValidateStringValue("PennTreebankNode", usage_, {"train", "test", "valid", "all"})); + RETURN_IF_NOT_OK(ValidateEnum("PennTreebankNode", "ShuffleMode", shuffle_, + {ShuffleMode::kFalse, ShuffleMode::kFiles, ShuffleMode::kGlobal})); + if (num_samples_ < 0) { + std::string err_msg = "PennTreebankNode: Invalid number of samples: " + std::to_string(num_samples_); + LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg); + } + RETURN_IF_NOT_OK(ValidateDatasetShardParams("PennTreebankNode", num_shards_, shard_id_)); + return Status::OK(); +} + +// Function to build PennTreebankNode. +Status PennTreebankNode::Build(std::vector> *const node_ops) { + bool shuffle_files = (shuffle_ == ShuffleMode::kGlobal || shuffle_ == ShuffleMode::kFiles); + // Sort the dataset files in a lexicographical order. + std::vector sorted_dataset_files = penn_treebank_files_list_; + std::sort(sorted_dataset_files.begin(), sorted_dataset_files.end()); + // Do internal Schema generation. + auto schema = std::make_unique(); + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("text", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); + // Create and initialize PennTreebankNode. + std::shared_ptr penn_treebank_op = + std::make_shared(num_workers_, num_samples_, worker_connector_size_, std::move(schema), + sorted_dataset_files, connector_que_size_, shuffle_files, num_shards_, shard_id_); + RETURN_IF_NOT_OK(penn_treebank_op->Init()); + // If a global shuffle is used for PennTreebank, it will inject a shuffle op over the PennTreebank. + // But, if there is a cache in the tree, we do not need the global shuffle and the shuffle op should not be built. + // This is achieved in the cache transform pass where we call MakeSimpleProducer to reset PennTreebank's shuffle + // option to false. + if (shuffle_ == ShuffleMode::kGlobal) { + // Inject ShuffleOp. + std::shared_ptr shuffle_op = nullptr; + int64_t num_rows = 0; + // First, get the number of rows in the dataset. + RETURN_IF_NOT_OK(PennTreebankOp::CountAllFileRows(penn_treebank_files_list_, &num_rows)); + // Add the shuffle op after this op. + RETURN_IF_NOT_OK( + AddShuffleOp(sorted_dataset_files.size(), num_shards_, num_rows, 0, connector_que_size_, &shuffle_op)); + shuffle_op->SetTotalRepeats(GetTotalRepeats()); + shuffle_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + shuffle_op->Skip(skip_steps_); + node_ops->push_back(shuffle_op); + } + penn_treebank_op->SetTotalRepeats(GetTotalRepeats()); + penn_treebank_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + // Add PennTreebankNode. + node_ops->push_back(penn_treebank_op); + return Status::OK(); +} + +// Get the shard id of node. +Status PennTreebankNode::GetShardId(int32_t *shard_id) { + *shard_id = shard_id_; + return Status::OK(); +} + +// Get Dataset size. +Status PennTreebankNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) { + if (dataset_size_ > 0) { + *dataset_size = dataset_size_; + return Status::OK(); + } + int64_t num_rows, sample_size = num_samples_; + RETURN_IF_NOT_OK(PennTreebankOp::CountAllFileRows(penn_treebank_files_list_, &num_rows)); + num_rows = static_cast(ceil(num_rows / (1.0 * num_shards_))); + *dataset_size = sample_size > 0 ? std::min(num_rows, sample_size) : num_rows; + dataset_size_ = *dataset_size; + return Status::OK(); +} + +Status PennTreebankNode::to_json(nlohmann::json *out_json) { + nlohmann::json args; + args["num_parallel_workers"] = num_workers_; + args["connector_queue_size"] = connector_que_size_; + args["dataset_dir"] = dataset_dir_; + args["usage"] = usage_; + args["num_samples"] = num_samples_; + args["shuffle"] = shuffle_; + args["num_shards"] = num_shards_; + args["shard_id"] = shard_id_; + if (cache_ != nullptr) { + nlohmann::json cache_args; + RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); + args["cache"] = cache_args; + } + *out_json = args; + return Status::OK(); +} + +// Note: The following two functions are common among NonMappableSourceNode and should be promoted to its parent class. +// PennTreebank by itself is a non-mappable dataset that does not support sampling. +// However, if a cache operator is injected at some other place higher in the tree, that cache can +// inherit this sampler from the leaf, providing sampling support from the caching layer. +// That is why we setup the sampler for a leaf node that does not use sampling. +Status PennTreebankNode::SetupSamplerForCache(std::shared_ptr *sampler) { + *sampler = SelectSampler(num_samples_, shuffle_, num_shards_, shard_id_); + return Status::OK(); +} + +// If a cache has been added into the ascendant tree over this PennTreebank node, then the cache will be executing +// a sampler for fetching the data. As such, any options in the PennTreebank node need to be reset to its defaults so +// that this PennTreebank node will produce the full set of data into the cache. +Status PennTreebankNode::MakeSimpleProducer() { + shard_id_ = 0; + num_shards_ = 1; + shuffle_ = ShuffleMode::kFalse; + num_samples_ = 0; + return Status::OK(); +} + +std::vector PennTreebankNode::WalkAllFiles(const std::string &usage, const std::string &dataset_dir) { + std::vector penn_treebank_files_list; + Path train_prefix("ptb.train.txt"); + Path test_prefix("ptb.test.txt"); + Path valid_prefix("ptb.valid.txt"); + Path dir(dataset_dir); + + if (usage == "train") { + Path temp_path = dir / train_prefix; + penn_treebank_files_list.push_back(temp_path.ToString()); + } else if (usage == "test") { + Path temp_path = dir / test_prefix; + penn_treebank_files_list.push_back(temp_path.ToString()); + } else if (usage == "valid") { + Path temp_path = dir / valid_prefix; + penn_treebank_files_list.push_back(temp_path.ToString()); + } else { + Path temp_path = dir / train_prefix; + penn_treebank_files_list.push_back(temp_path.ToString()); + Path temp_path1 = dir / test_prefix; + penn_treebank_files_list.push_back(temp_path1.ToString()); + Path temp_path2 = dir / valid_prefix; + penn_treebank_files_list.push_back(temp_path2.ToString()); + } + return penn_treebank_files_list; +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/penn_treebank_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/penn_treebank_node.h index c37df769..140da173 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/penn_treebank_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/penn_treebank_node.h @@ -1,124 +1,124 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_PENN_TREEBANK_NODE_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_PENN_TREEBANK_NODE_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" - -namespace mindspore { -namespace dataset { -/// \brief class PennTreebankNode. -/// \brief Dataset derived class to represent PennTreebank dataset. -class PennTreebankNode : public NonMappableSourceNode { - public: - /// \brief Constructor. - PennTreebankNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, - int32_t num_shards, int32_t shard_id, const std::shared_ptr &cache); - - /// \brief Destructor. - ~PennTreebankNode() override = default; - - /// \brief Node name getter. - /// \return Name of the current node. - std::string Name() const override { return kPennTreebankNode; } - - /// \brief Print the description. - /// \param[in] out The output stream to write output to. - void Print(std::ostream &out) const override; - - /// \brief Copy the node to a new object. - /// \return A shared pointer to the new copy. - std::shared_ptr Copy() override; - - /// \brief A base class override function to create the required runtime dataset op objects for this class. - /// \param[in] node_ops A vector containing shared pointer to the Dataset Ops that this object will create. - /// \return Status Status::OK() if build successfully. - Status Build(std::vector> *const node_ops) override; - - /// \brief Parameters validation. - /// \return Status Status::OK() if all the parameters are valid. - Status ValidateParams() override; - - /// \brief Get the shard id of node. - /// \param[in] shard_id The shard id. - /// \return Status Status::OK() if get shard id successfully. - Status GetShardId(int32_t *shard_id) override; - - /// \brief Base-class override for GetDatasetSize. - /// \param[in] size_getter Shared pointer to DatasetSizeGetter. - /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting - /// dataset size at the expense of accuracy. - /// \param[out] dataset_size the size of the dataset. - /// \return Status of the function. - Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) override; - - /// \brief Getter functions. - const std::string &DatasetDir() const { return dataset_dir_; } - int32_t NumSamples() const { return num_samples_; } - int32_t NumShards() const { return num_shards_; } - int32_t ShardId() const { return shard_id_; } - ShuffleMode Shuffle() const { return shuffle_; } - const std::string &Usage() const { return usage_; } - - /// \brief Get the arguments of node - /// \param[out] out_json JSON string of all attributes - /// \return Status of the function - Status to_json(nlohmann::json *out_json) override; - - /// \brief PennTreebank by itself is a non-mappable dataset that does not support sampling. - /// However, if a cache operator is injected at some other place higher in - /// the tree, that cache can inherit this sampler from the leaf, providing - /// sampling support from the caching layer. That is why we setup the - /// sampler for a leaf node that does not use sampling. Note: This - /// function is common among NonMappableSourceNode and should be promoted - /// to its parent class. - /// \param[in] sampler The sampler to setup. - /// \return Status of the function. - Status SetupSamplerForCache(std::shared_ptr *sampler) override; - - /// \brief If a cache has been added into the ascendant tree over this PennTreebank node, - /// then the cache will be executing a sampler for fetching the data. - /// As such, any options in the PennTreebank node need to be reset to its defaults - /// so that this PennTreebank node will produce the full set of data into the cache. - /// Note: This function is common among NonMappableSourceNode and should be promoted to its - /// parent class. - /// \return Status of the function. - Status MakeSimpleProducer() override; - - /// \brief Generate a list of read file names according to usage. - /// \param[in] usage Part of dataset of PennTreebank. - /// \param[in] dataset_dir Path to the root directory that contains the dataset. - /// \return std::vector A list of read file names. - std::vector WalkAllFiles(const std::string &usage, const std::string &dataset_dir); - - private: - std::string dataset_dir_; - std::string usage_; - int64_t num_samples_; - int32_t num_shards_; - int32_t shard_id_; - ShuffleMode shuffle_; - std::vector penn_treebank_files_list_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_PENN_TREEBANK_NODE_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_PENN_TREEBANK_NODE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_PENN_TREEBANK_NODE_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" + +namespace mindspore::lite { +namespace dataset { +/// \brief class PennTreebankNode. +/// \brief Dataset derived class to represent PennTreebank dataset. +class PennTreebankNode : public NonMappableSourceNode { + public: + /// \brief Constructor. + PennTreebankNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, + int32_t num_shards, int32_t shard_id, const std::shared_ptr &cache); + + /// \brief Destructor. + ~PennTreebankNode() override = default; + + /// \brief Node name getter. + /// \return Name of the current node. + std::string Name() const override { return kPennTreebankNode; } + + /// \brief Print the description. + /// \param[in] out The output stream to write output to. + void Print(std::ostream &out) const override; + + /// \brief Copy the node to a new object. + /// \return A shared pointer to the new copy. + std::shared_ptr Copy() override; + + /// \brief A base class override function to create the required runtime dataset op objects for this class. + /// \param[in] node_ops A vector containing shared pointer to the Dataset Ops that this object will create. + /// \return Status Status::OK() if build successfully. + Status Build(std::vector> *const node_ops) override; + + /// \brief Parameters validation. + /// \return Status Status::OK() if all the parameters are valid. + Status ValidateParams() override; + + /// \brief Get the shard id of node. + /// \param[in] shard_id The shard id. + /// \return Status Status::OK() if get shard id successfully. + Status GetShardId(int32_t *shard_id) override; + + /// \brief Base-class override for GetDatasetSize. + /// \param[in] size_getter Shared pointer to DatasetSizeGetter. + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting + /// dataset size at the expense of accuracy. + /// \param[out] dataset_size the size of the dataset. + /// \return Status of the function. + Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) override; + + /// \brief Getter functions. + const std::string &DatasetDir() const { return dataset_dir_; } + int32_t NumSamples() const { return num_samples_; } + int32_t NumShards() const { return num_shards_; } + int32_t ShardId() const { return shard_id_; } + ShuffleMode Shuffle() const { return shuffle_; } + const std::string &Usage() const { return usage_; } + + /// \brief Get the arguments of node + /// \param[out] out_json JSON string of all attributes + /// \return Status of the function + Status to_json(nlohmann::json *out_json) override; + + /// \brief PennTreebank by itself is a non-mappable dataset that does not support sampling. + /// However, if a cache operator is injected at some other place higher in + /// the tree, that cache can inherit this sampler from the leaf, providing + /// sampling support from the caching layer. That is why we setup the + /// sampler for a leaf node that does not use sampling. Note: This + /// function is common among NonMappableSourceNode and should be promoted + /// to its parent class. + /// \param[in] sampler The sampler to setup. + /// \return Status of the function. + Status SetupSamplerForCache(std::shared_ptr *sampler) override; + + /// \brief If a cache has been added into the ascendant tree over this PennTreebank node, + /// then the cache will be executing a sampler for fetching the data. + /// As such, any options in the PennTreebank node need to be reset to its defaults + /// so that this PennTreebank node will produce the full set of data into the cache. + /// Note: This function is common among NonMappableSourceNode and should be promoted to its + /// parent class. + /// \return Status of the function. + Status MakeSimpleProducer() override; + + /// \brief Generate a list of read file names according to usage. + /// \param[in] usage Part of dataset of PennTreebank. + /// \param[in] dataset_dir Path to the root directory that contains the dataset. + /// \return std::vector A list of read file names. + std::vector WalkAllFiles(const std::string &usage, const std::string &dataset_dir); + + private: + std::string dataset_dir_; + std::string usage_; + int64_t num_samples_; + int32_t num_shards_; + int32_t shard_id_; + ShuffleMode shuffle_; + std::vector penn_treebank_files_list_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_PENN_TREEBANK_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/photo_tour_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/photo_tour_node.cc index 809fc96b..65562829 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/photo_tour_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/photo_tour_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/photo_tour_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { PhotoTourNode::PhotoTourNode(const std::string &dataset_dir, const std::string &name, const std::string &usage, std::shared_ptr sampler, std::shared_ptr cache) @@ -128,4 +128,4 @@ Status PhotoTourNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/photo_tour_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/photo_tour_node.h index b69c9efb..685f2592 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/photo_tour_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/photo_tour_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class PhotoTourNode : public MappableSourceNode { public: @@ -94,5 +94,5 @@ class PhotoTourNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_PHOTO_TOUR_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/places365_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/places365_node.cc index 9700e8c4..a1fa3ba3 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/places365_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/places365_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/places365_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Places365Node::Places365Node(const std::string &dataset_dir, const std::string &usage, bool small, bool decode, std::shared_ptr sampler, std::shared_ptr cache) @@ -116,4 +116,4 @@ Status Places365Node::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/places365_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/places365_node.h index d34fc765..c8d964b8 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/places365_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/places365_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class Places365Node : public MappableSourceNode { public: @@ -96,5 +96,5 @@ class Places365Node : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_PLACES365_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/qmnist_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/qmnist_node.cc index fb377561..b2f7eef1 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/qmnist_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/qmnist_node.cc @@ -1,154 +1,154 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/qmnist_node.h" - -#include -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/qmnist_op.h" -#ifndef ENABLE_ANDROID -#include "mindspore-lite/minddata/dataset/engine/serdes.h" -#endif -#include "mindspore-lite/minddata/dataset/util/status.h" - -namespace mindspore { -namespace dataset { -QMnistNode::QMnistNode(const std::string &dataset_dir, const std::string &usage, bool compat, - std::shared_ptr sampler, std::shared_ptr cache) - : MappableSourceNode(std::move(cache)), - dataset_dir_(dataset_dir), - usage_(usage), - compat_(compat), - sampler_(sampler) {} - -std::shared_ptr QMnistNode::Copy() { - std::shared_ptr sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy(); - auto node = std::make_shared(dataset_dir_, usage_, compat_, sampler, cache_); - (void)node->SetNumWorkers(num_workers_); - (void)node->SetConnectorQueueSize(connector_que_size_); - return node; -} - -void QMnistNode::Print(std::ostream &out) const { - out << (Name() + "(dataset dir: " + dataset_dir_ + ", usage: " + usage_ + - ", compat: " + (compat_ ? "true" : "false") + ", cache: " + ((cache_ != nullptr) ? "true" : "false") + ")"); -} - -Status QMnistNode::ValidateParams() { - RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("QMnistDataset", dataset_dir_)); - RETURN_IF_NOT_OK(ValidateDatasetSampler("QMnistDataset", sampler_)); - RETURN_IF_NOT_OK( - ValidateStringValue("QMnistDataset", usage_, {"train", "test", "test10k", "test50k", "nist", "all"})); - return Status::OK(); -} - -Status QMnistNode::Build(std::vector> *const node_ops) { - // Do internal Schema generation. - auto schema = std::make_unique(); - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kCv, 1))); - if (compat_) { - TensorShape scalar = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK( - schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar))); - } else { - RETURN_IF_NOT_OK( - schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 1))); - } - - std::shared_ptr sampler_rt = nullptr; - RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); - - auto op = std::make_shared(dataset_dir_, usage_, compat_, std::move(schema), std::move(sampler_rt), - num_workers_, connector_que_size_); - op->SetTotalRepeats(GetTotalRepeats()); - op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - node_ops->push_back(op); - - return Status::OK(); -} - -// Get the shard id of node -Status QMnistNode::GetShardId(int32_t *shard_id) { - *shard_id = sampler_->ShardId(); - - return Status::OK(); -} - -// Get Dataset size -Status QMnistNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) { - if (dataset_size_ > 0) { - *dataset_size = dataset_size_; - return Status::OK(); - } - int64_t num_rows, sample_size; - RETURN_IF_NOT_OK(QMnistOp::CountTotalRows(dataset_dir_, usage_, &num_rows)); - std::shared_ptr sampler_rt = nullptr; - RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); - sample_size = sampler_rt->CalculateNumSamples(num_rows); - if (sample_size == -1) { - RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size)); - } - *dataset_size = sample_size; - dataset_size_ = *dataset_size; - return Status::OK(); -} - -Status QMnistNode::to_json(nlohmann::json *out_json) { - nlohmann::json args, sampler_args; - RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args)); - args["sampler"] = sampler_args; - args["num_parallel_workers"] = num_workers_; - args["connector_queue_size"] = connector_que_size_; - args["dataset_dir"] = dataset_dir_; - args["usage"] = usage_; - args["compat"] = compat_; - if (cache_ != nullptr) { - nlohmann::json cache_args; - RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); - args["cache"] = cache_args; - } - *out_json = args; - return Status::OK(); -} - -#ifndef ENABLE_ANDROID -Status QMnistNode::from_json(nlohmann::json json_obj, std::shared_ptr *ds) { - RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "num_parallel_workers", kQMnistNode)); - RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "connector_queue_size", kQMnistNode)); - RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "dataset_dir", kQMnistNode)); - RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "usage", kQMnistNode)); - RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "compat", kQMnistNode)); - RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "sampler", kQMnistNode)); - std::string dataset_dir = json_obj["dataset_dir"]; - std::string usage = json_obj["usage"]; - bool compat = json_obj["compat"]; - std::shared_ptr sampler; - RETURN_IF_NOT_OK(Serdes::ConstructSampler(json_obj["sampler"], &sampler)); - std::shared_ptr cache = nullptr; - RETURN_IF_NOT_OK(DatasetCache::from_json(json_obj, &cache)); - *ds = std::make_shared(dataset_dir, usage, compat, sampler, cache); - (void)(*ds)->SetNumWorkers(json_obj["num_parallel_workers"]); - (void)(*ds)->SetConnectorQueueSize(json_obj["connector_queue_size"]); - return Status::OK(); -} -#endif -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/qmnist_node.h" + +#include +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/qmnist_op.h" +#ifndef ENABLE_ANDROID +#include "mindspore-lite/minddata/dataset/engine/serdes.h" +#endif +#include "mindspore-lite/minddata/dataset/util/status.h" + +namespace mindspore::lite { +namespace dataset { +QMnistNode::QMnistNode(const std::string &dataset_dir, const std::string &usage, bool compat, + std::shared_ptr sampler, std::shared_ptr cache) + : MappableSourceNode(std::move(cache)), + dataset_dir_(dataset_dir), + usage_(usage), + compat_(compat), + sampler_(sampler) {} + +std::shared_ptr QMnistNode::Copy() { + std::shared_ptr sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy(); + auto node = std::make_shared(dataset_dir_, usage_, compat_, sampler, cache_); + (void)node->SetNumWorkers(num_workers_); + (void)node->SetConnectorQueueSize(connector_que_size_); + return node; +} + +void QMnistNode::Print(std::ostream &out) const { + out << (Name() + "(dataset dir: " + dataset_dir_ + ", usage: " + usage_ + + ", compat: " + (compat_ ? "true" : "false") + ", cache: " + ((cache_ != nullptr) ? "true" : "false") + ")"); +} + +Status QMnistNode::ValidateParams() { + RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("QMnistDataset", dataset_dir_)); + RETURN_IF_NOT_OK(ValidateDatasetSampler("QMnistDataset", sampler_)); + RETURN_IF_NOT_OK( + ValidateStringValue("QMnistDataset", usage_, {"train", "test", "test10k", "test50k", "nist", "all"})); + return Status::OK(); +} + +Status QMnistNode::Build(std::vector> *const node_ops) { + // Do internal Schema generation. + auto schema = std::make_unique(); + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kCv, 1))); + if (compat_) { + TensorShape scalar = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK( + schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar))); + } else { + RETURN_IF_NOT_OK( + schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 1))); + } + + std::shared_ptr sampler_rt = nullptr; + RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); + + auto op = std::make_shared(dataset_dir_, usage_, compat_, std::move(schema), std::move(sampler_rt), + num_workers_, connector_que_size_); + op->SetTotalRepeats(GetTotalRepeats()); + op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + node_ops->push_back(op); + + return Status::OK(); +} + +// Get the shard id of node +Status QMnistNode::GetShardId(int32_t *shard_id) { + *shard_id = sampler_->ShardId(); + + return Status::OK(); +} + +// Get Dataset size +Status QMnistNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) { + if (dataset_size_ > 0) { + *dataset_size = dataset_size_; + return Status::OK(); + } + int64_t num_rows, sample_size; + RETURN_IF_NOT_OK(QMnistOp::CountTotalRows(dataset_dir_, usage_, &num_rows)); + std::shared_ptr sampler_rt = nullptr; + RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); + sample_size = sampler_rt->CalculateNumSamples(num_rows); + if (sample_size == -1) { + RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size)); + } + *dataset_size = sample_size; + dataset_size_ = *dataset_size; + return Status::OK(); +} + +Status QMnistNode::to_json(nlohmann::json *out_json) { + nlohmann::json args, sampler_args; + RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args)); + args["sampler"] = sampler_args; + args["num_parallel_workers"] = num_workers_; + args["connector_queue_size"] = connector_que_size_; + args["dataset_dir"] = dataset_dir_; + args["usage"] = usage_; + args["compat"] = compat_; + if (cache_ != nullptr) { + nlohmann::json cache_args; + RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); + args["cache"] = cache_args; + } + *out_json = args; + return Status::OK(); +} + +#ifndef ENABLE_ANDROID +Status QMnistNode::from_json(nlohmann::json json_obj, std::shared_ptr *ds) { + RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "num_parallel_workers", kQMnistNode)); + RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "connector_queue_size", kQMnistNode)); + RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "dataset_dir", kQMnistNode)); + RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "usage", kQMnistNode)); + RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "compat", kQMnistNode)); + RETURN_IF_NOT_OK(ValidateParamInJson(json_obj, "sampler", kQMnistNode)); + std::string dataset_dir = json_obj["dataset_dir"]; + std::string usage = json_obj["usage"]; + bool compat = json_obj["compat"]; + std::shared_ptr sampler; + RETURN_IF_NOT_OK(Serdes::ConstructSampler(json_obj["sampler"], &sampler)); + std::shared_ptr cache = nullptr; + RETURN_IF_NOT_OK(DatasetCache::from_json(json_obj, &cache)); + *ds = std::make_shared(dataset_dir, usage, compat, sampler, cache); + (void)(*ds)->SetNumWorkers(json_obj["num_parallel_workers"]); + (void)(*ds)->SetConnectorQueueSize(json_obj["connector_queue_size"]); + return Status::OK(); +} +#endif +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/qmnist_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/qmnist_node.h index f291b17e..ad56640e 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/qmnist_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/qmnist_node.h @@ -1,109 +1,109 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_QMNIST_NODE_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_QMNIST_NODE_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" - -namespace mindspore { -namespace dataset { -class QMnistNode : public MappableSourceNode { - public: - /// \brief Constructor. - QMnistNode(const std::string &dataset_dir, const std::string &usage, bool compat, std::shared_ptr sampler, - std::shared_ptr cache); - - /// \brief Destructor. - ~QMnistNode() override = default; - - /// \brief Node name getter. - /// \return Name of the current node. - std::string Name() const override { return kQMnistNode; } - - /// \brief Print the description. - /// \param out - The output stream to write output to. - void Print(std::ostream &out) const override; - - /// \brief Copy the node to a new object. - /// \return A shared pointer to the new copy. - std::shared_ptr Copy() override; - - /// \brief a base class override function to create the required runtime dataset op objects for this class. - /// \param node_ops - A vector containing shared pointer to the Dataset Ops that this object will create. - /// \return Status Status::OK() if build successfully. - Status Build(std::vector> *const node_ops) override; - - /// \brief Parameters validation. - /// \return Status Status::OK() if all the parameters are valid. - Status ValidateParams() override; - - /// \brief Get the shard id of node. - /// \param[in] shard_id The shard id. - /// \return Status Status::OK() if get shard id successfully. - Status GetShardId(int32_t *shard_id) override; - - /// \brief Base-class override for GetDatasetSize. - /// \param[in] size_getter Shared pointer to DatasetSizeGetter. - /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting - /// dataset size at the expense of accuracy. - /// \param[out] dataset_size the size of the dataset. - /// \return Status of the function. - Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) override; - - /// \brief Getter functions. - const std::string &DatasetDir() const { return dataset_dir_; } - - /// \brief Getter functions. - const std::string &Usage() const { return usage_; } - - /// \brief Getter functions. - const bool Compat() const { return compat_; } - - /// \brief Get the arguments of node. - /// \param[out] out_json JSON string of all attributes. - /// \return Status of the function. - Status to_json(nlohmann::json *out_json) override; - -#ifndef ENABLE_ANDROID - /// \brief Function to read dataset in json - /// \param[in] json_obj The JSON object to be deserialized - /// \param[out] ds Deserialized dataset - /// \return Status The status code returned - static Status from_json(nlohmann::json json_obj, std::shared_ptr *ds); -#endif - - /// \brief Sampler getter. - /// \return SamplerObj of the current node. - std::shared_ptr Sampler() override { return sampler_; } - - /// \brief Sampler setter. - void SetSampler(std::shared_ptr sampler) override { sampler_ = sampler; } - - private: - std::string dataset_dir_; - std::string usage_; - bool compat_; - std::shared_ptr sampler_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_QMNIST_NODE_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_QMNIST_NODE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_QMNIST_NODE_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" + +namespace mindspore::lite { +namespace dataset { +class QMnistNode : public MappableSourceNode { + public: + /// \brief Constructor. + QMnistNode(const std::string &dataset_dir, const std::string &usage, bool compat, std::shared_ptr sampler, + std::shared_ptr cache); + + /// \brief Destructor. + ~QMnistNode() override = default; + + /// \brief Node name getter. + /// \return Name of the current node. + std::string Name() const override { return kQMnistNode; } + + /// \brief Print the description. + /// \param out - The output stream to write output to. + void Print(std::ostream &out) const override; + + /// \brief Copy the node to a new object. + /// \return A shared pointer to the new copy. + std::shared_ptr Copy() override; + + /// \brief a base class override function to create the required runtime dataset op objects for this class. + /// \param node_ops - A vector containing shared pointer to the Dataset Ops that this object will create. + /// \return Status Status::OK() if build successfully. + Status Build(std::vector> *const node_ops) override; + + /// \brief Parameters validation. + /// \return Status Status::OK() if all the parameters are valid. + Status ValidateParams() override; + + /// \brief Get the shard id of node. + /// \param[in] shard_id The shard id. + /// \return Status Status::OK() if get shard id successfully. + Status GetShardId(int32_t *shard_id) override; + + /// \brief Base-class override for GetDatasetSize. + /// \param[in] size_getter Shared pointer to DatasetSizeGetter. + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting + /// dataset size at the expense of accuracy. + /// \param[out] dataset_size the size of the dataset. + /// \return Status of the function. + Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) override; + + /// \brief Getter functions. + const std::string &DatasetDir() const { return dataset_dir_; } + + /// \brief Getter functions. + const std::string &Usage() const { return usage_; } + + /// \brief Getter functions. + const bool Compat() const { return compat_; } + + /// \brief Get the arguments of node. + /// \param[out] out_json JSON string of all attributes. + /// \return Status of the function. + Status to_json(nlohmann::json *out_json) override; + +#ifndef ENABLE_ANDROID + /// \brief Function to read dataset in json + /// \param[in] json_obj The JSON object to be deserialized + /// \param[out] ds Deserialized dataset + /// \return Status The status code returned + static Status from_json(nlohmann::json json_obj, std::shared_ptr *ds); +#endif + + /// \brief Sampler getter. + /// \return SamplerObj of the current node. + std::shared_ptr Sampler() override { return sampler_; } + + /// \brief Sampler setter. + void SetSampler(std::shared_ptr sampler) override { sampler_ = sampler; } + + private: + std::string dataset_dir_; + std::string usage_; + bool compat_; + std::shared_ptr sampler_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_QMNIST_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/random_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/random_node.cc index 94120588..e9d117a9 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/random_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/random_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { std::shared_ptr RandomNode::Copy() { std::shared_ptr node; @@ -148,4 +148,4 @@ Status RandomNode::AcceptAfter(IRNodePass *const p, bool *const modified) { return p->VisitAfter(shared_from_base(), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/random_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/random_node.h index 7ccdec94..c2e348e9 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/random_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/random_node.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" #include "mindspore-lite/minddata/dataset/include/dataset/samplers.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomNode : public NonMappableSourceNode { public: @@ -137,5 +137,5 @@ class RandomNode : public NonMappableSourceNode { std::unique_ptr data_schema_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_RANDOM_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/rendered_sst2_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/rendered_sst2_node.cc index 8fb04d3e..801efc61 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/rendered_sst2_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/rendered_sst2_node.cc @@ -28,7 +28,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const std::set kExts = {".png"}; @@ -146,4 +146,4 @@ Status RenderedSST2Node::from_json(nlohmann::json json_obj, std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_RENDERED_SST2_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/distributed_sampler_ir.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/distributed_sampler_ir.cc index 9db6e5a5..a7fed0fe 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/distributed_sampler_ir.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/distributed_sampler_ir.cc @@ -28,7 +28,7 @@ #include "minddata/mindrecord/include/shard_shuffle.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor DistributedSamplerObj::DistributedSamplerObj(int64_t num_shards, int64_t shard_id, dataset::ShuffleMode shuffle_mode, @@ -154,4 +154,4 @@ std::shared_ptr DistributedSamplerObj::SamplerCopy() { int64_t DistributedSamplerObj::ShardId() { return shard_id_; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/distributed_sampler_ir.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/distributed_sampler_ir.h index 4242acc8..0e858999 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/distributed_sampler_ir.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/distributed_sampler_ir.h @@ -27,7 +27,7 @@ #include "minddata/mindrecord/include/shard_operator.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Internal Sampler class forward declaration class SamplerRT; @@ -77,5 +77,5 @@ class DistributedSamplerObj : public SamplerObj { bool even_dist_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SAMPLERS_DISTRIBUTED_SAMPLER_IR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/pk_sampler_ir.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/pk_sampler_ir.cc index 35b4f518..e1835120 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/pk_sampler_ir.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/pk_sampler_ir.cc @@ -30,7 +30,7 @@ #include "minddata/mindrecord/include/shard_shuffle.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor PKSamplerObj::PKSamplerObj(int64_t num_val, bool shuffle, int64_t num_samples) @@ -109,4 +109,4 @@ std::shared_ptr PKSamplerObj::SamplerCopy() { return sampler; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/pk_sampler_ir.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/pk_sampler_ir.h index 9f2ac5f7..8a73f901 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/pk_sampler_ir.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/pk_sampler_ir.h @@ -27,7 +27,7 @@ #include "minddata/mindrecord/include/shard_operator.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Internal Sampler class forward declaration class SamplerRT; @@ -68,5 +68,5 @@ class PKSamplerObj : public SamplerObj { int64_t num_samples_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SAMPLERS_PK_SAMPLER_IR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/prebuilt_sampler_ir.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/prebuilt_sampler_ir.cc index cfe8b66b..51f1c0d9 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/prebuilt_sampler_ir.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/prebuilt_sampler_ir.cc @@ -30,7 +30,7 @@ #include "minddata/mindrecord/include/shard_shuffle.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor PreBuiltSamplerObj::PreBuiltSamplerObj(std::shared_ptr sampler) : sp_(std::move(sampler)) {} @@ -87,4 +87,4 @@ Status PreBuiltSamplerObj::to_json(nlohmann::json *const out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/prebuilt_sampler_ir.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/prebuilt_sampler_ir.h index 6d1ddce7..4ede3d87 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/prebuilt_sampler_ir.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/prebuilt_sampler_ir.h @@ -26,7 +26,7 @@ #include "minddata/mindrecord/include/shard_operator.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Internal Sampler class forward declaration class SamplerRT; @@ -60,5 +60,5 @@ class PreBuiltSamplerObj : public SamplerObj { #endif }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SAMPLERS_PREBUILT_SAMPLER_IR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/random_sampler_ir.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/random_sampler_ir.cc index 84905d19..fb09dd3d 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/random_sampler_ir.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/random_sampler_ir.cc @@ -28,7 +28,7 @@ #include "minddata/mindrecord/include/shard_shuffle.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor RandomSamplerObj::RandomSamplerObj(bool replacement, int64_t num_samples, bool reshuffle_each_epoch, @@ -103,4 +103,4 @@ std::shared_ptr RandomSamplerObj::SamplerCopy() { return sampler; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/random_sampler_ir.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/random_sampler_ir.h index b7b9b65f..e8a0aeac 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/random_sampler_ir.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/random_sampler_ir.h @@ -27,7 +27,7 @@ #include "minddata/mindrecord/include/shard_operator.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Internal Sampler class forward declaration class SamplerRT; @@ -70,5 +70,5 @@ class RandomSamplerObj : public SamplerObj { dataset::ShuffleMode shuffle_mode_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SAMPLERS_RANDOM_SAMPLER_IR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/samplers_ir.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/samplers_ir.cc index 18b34e03..3ecac7d9 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/samplers_ir.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/samplers_ir.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/core/config_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor @@ -87,4 +87,4 @@ Status SamplerObj::from_json(nlohmann::json json_obj, std::shared_ptr> children_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SAMPLERS_SAMPLERS_IR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/sequential_sampler_ir.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/sequential_sampler_ir.cc index fd5a0311..620e0a78 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/sequential_sampler_ir.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/sequential_sampler_ir.cc @@ -28,7 +28,7 @@ #include "minddata/mindrecord/include/shard_shuffle.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor SequentialSamplerObj::SequentialSamplerObj(int64_t start_index, int64_t num_samples) @@ -101,4 +101,4 @@ std::shared_ptr SequentialSamplerObj::SamplerCopy() { return sampler; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/sequential_sampler_ir.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/sequential_sampler_ir.h index 00928f04..68b9f476 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/sequential_sampler_ir.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/sequential_sampler_ir.h @@ -27,7 +27,7 @@ #include "minddata/mindrecord/include/shard_operator.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Internal Sampler class forward declaration class SamplerRT; @@ -67,5 +67,5 @@ class SequentialSamplerObj : public SamplerObj { int64_t num_samples_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SAMPLERS_SEQUENTIAL_SAMPLER_IR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/skip_first_epoch_sampler_ir.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/skip_first_epoch_sampler_ir.cc index af3535f7..3dcbb956 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/skip_first_epoch_sampler_ir.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/skip_first_epoch_sampler_ir.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sampler/skip_first_epoch_sampler.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor SkipFirstEpochSamplerObj::SkipFirstEpochSamplerObj(int64_t start_index) : SequentialSamplerObj(start_index, 0) {} @@ -65,4 +65,4 @@ std::shared_ptr SkipFirstEpochSamplerObj::SamplerCopy() { return sampler; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/skip_first_epoch_sampler_ir.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/skip_first_epoch_sampler_ir.h index ddc4949e..19d48403 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/skip_first_epoch_sampler_ir.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/skip_first_epoch_sampler_ir.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/sequential_sampler_ir.h" #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Internal Sampler class forward declaration class SamplerRT; @@ -52,5 +52,5 @@ class SkipFirstEpochSamplerObj : public SequentialSamplerObj { #endif }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SAMPLERS_SKIP_FIRST_EPOCH_SAMPLER_IR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_random_sampler_ir.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_random_sampler_ir.cc index 88b4e1c1..062caa9c 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_random_sampler_ir.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_random_sampler_ir.cc @@ -30,7 +30,7 @@ #include "minddata/mindrecord/include/shard_shuffle.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor SubsetRandomSamplerObj::SubsetRandomSamplerObj(std::vector indices, int64_t num_samples) @@ -89,4 +89,4 @@ std::shared_ptr SubsetRandomSamplerObj::SamplerCopy() { return sampler; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_random_sampler_ir.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_random_sampler_ir.h index 87605dc9..f3fe3456 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_random_sampler_ir.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_random_sampler_ir.h @@ -29,7 +29,7 @@ #include "minddata/mindrecord/include/shard_operator.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Internal Sampler class forward declaration class SamplerRT; @@ -57,5 +57,5 @@ class SubsetRandomSamplerObj : public SubsetSamplerObj { private: }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SAMPLERS_SUBSET_RANDOM_SAMPLER_IR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_sampler_ir.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_sampler_ir.cc index c35a9383..ae5d4077 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_sampler_ir.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_sampler_ir.cc @@ -30,7 +30,7 @@ #include "minddata/mindrecord/include/shard_shuffle.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor SubsetSamplerObj::SubsetSamplerObj(std::vector indices, int64_t num_samples) @@ -96,4 +96,4 @@ std::shared_ptr SubsetSamplerObj::SamplerCopy() { return sampler; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_sampler_ir.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_sampler_ir.h index b3dfd18e..e91ecc87 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_sampler_ir.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/subset_sampler_ir.h @@ -28,7 +28,7 @@ #include "minddata/mindrecord/include/shard_operator.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Internal Sampler class forward declaration class SamplerRT; @@ -68,5 +68,5 @@ class SubsetSamplerObj : public SamplerObj { int64_t num_samples_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SAMPLERS_SUBSET_SAMPLER_IR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/weighted_random_sampler_ir.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/weighted_random_sampler_ir.cc index 8e9fe9b4..2d18589a 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/weighted_random_sampler_ir.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/weighted_random_sampler_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/core/config_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor WeightedRandomSamplerObj::WeightedRandomSamplerObj(std::vector weights, int64_t num_samples, bool replacement) @@ -99,4 +99,4 @@ std::shared_ptr WeightedRandomSamplerObj::SamplerCopy() { return sampler; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/weighted_random_sampler_ir.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/weighted_random_sampler_ir.h index 837e069a..270ef596 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/weighted_random_sampler_ir.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/weighted_random_sampler_ir.h @@ -28,7 +28,7 @@ #include "minddata/mindrecord/include/shard_operator.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Internal Sampler class forward declaration class SamplerRT; @@ -65,5 +65,5 @@ class WeightedRandomSamplerObj : public SamplerObj { bool replacement_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SAMPLERS_WEIGHTED_RANDOM_SAMPLER_IR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sbu_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sbu_node.cc index 48e53c88..f1634d87 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sbu_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sbu_node.cc @@ -1,125 +1,125 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sbu_node.h" - -#include -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sbu_op.h" -#include "mindspore-lite/minddata/dataset/util/status.h" - -namespace mindspore { -namespace dataset { -SBUNode::SBUNode(const std::string &dataset_dir, bool decode, const std::shared_ptr &sampler, - const std::shared_ptr &cache) - : MappableSourceNode(std::move(cache)), dataset_dir_(dataset_dir), decode_(decode), sampler_(sampler) {} - -std::shared_ptr SBUNode::Copy() { - std::shared_ptr sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy(); - auto node = std::make_shared(dataset_dir_, decode_, sampler, cache_); - (void)node->SetNumWorkers(num_workers_); - (void)node->SetConnectorQueueSize(connector_que_size_); - return node; -} - -void SBUNode::Print(std::ostream &out) const { - out << (Name() + "(dataset dir: " + dataset_dir_ + ", decode: " + (decode_ ? "true" : "false") + - ", cache: " + ((cache_ != nullptr) ? "true" : "false") + ")"); -} - -Status SBUNode::ValidateParams() { - RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("SBUDataset", dataset_dir_)); - RETURN_IF_NOT_OK(ValidateDatasetSampler("SBUDataset", sampler_)); - - Path root_dir(dataset_dir_); - - Path url_path = root_dir / Path("SBU_captioned_photo_dataset_urls.txt"); - Path caption_path = root_dir / Path("SBU_captioned_photo_dataset_captions.txt"); - Path image_path = root_dir / Path("sbu_images"); - - RETURN_IF_NOT_OK(ValidateDatasetFilesParam("SBUDataset", {url_path.ToString()}, "url file")); - RETURN_IF_NOT_OK(ValidateDatasetFilesParam("SBUDataset", {caption_path.ToString()}, "caption file")); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("SBUDataset", {image_path.ToString()})); - - return Status::OK(); -} - -Status SBUNode::Build(std::vector> *const node_ops) { - // Do internal Schema generation. - auto schema = std::make_unique(); - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("caption", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); - std::shared_ptr sampler_rt = nullptr; - RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); - - auto op = std::make_shared(dataset_dir_, decode_, std::move(schema), std::move(sampler_rt), num_workers_, - connector_que_size_); - op->SetTotalRepeats(GetTotalRepeats()); - op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - node_ops->push_back(op); - - return Status::OK(); -} - -// Get the shard id of node -Status SBUNode::GetShardId(int32_t *shard_id) { - *shard_id = sampler_->ShardId(); - - return Status::OK(); -} - -// Get Dataset size -Status SBUNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) { - if (dataset_size_ > 0) { - *dataset_size = dataset_size_; - return Status::OK(); - } - int64_t num_rows, sample_size; - RETURN_IF_NOT_OK(SBUOp::CountTotalRows(dataset_dir_, &num_rows)); - std::shared_ptr sampler_rt = nullptr; - RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); - sample_size = sampler_rt->CalculateNumSamples(num_rows); - if (sample_size == -1) { - RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size)); - } - *dataset_size = sample_size; - dataset_size_ = *dataset_size; - return Status::OK(); -} - -Status SBUNode::to_json(nlohmann::json *out_json) { - nlohmann::json args, sampler_args; - RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args)); - args["sampler"] = sampler_args; - args["num_parallel_workers"] = num_workers_; - args["connector_queue_size"] = connector_que_size_; - args["dataset_dir"] = dataset_dir_; - args["decode"] = decode_; - if (cache_ != nullptr) { - nlohmann::json cache_args; - RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); - args["cache"] = cache_args; - } - *out_json = args; - return Status::OK(); -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sbu_node.h" + +#include +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sbu_op.h" +#include "mindspore-lite/minddata/dataset/util/status.h" + +namespace mindspore::lite { +namespace dataset { +SBUNode::SBUNode(const std::string &dataset_dir, bool decode, const std::shared_ptr &sampler, + const std::shared_ptr &cache) + : MappableSourceNode(std::move(cache)), dataset_dir_(dataset_dir), decode_(decode), sampler_(sampler) {} + +std::shared_ptr SBUNode::Copy() { + std::shared_ptr sampler = (sampler_ == nullptr) ? nullptr : sampler_->SamplerCopy(); + auto node = std::make_shared(dataset_dir_, decode_, sampler, cache_); + (void)node->SetNumWorkers(num_workers_); + (void)node->SetConnectorQueueSize(connector_que_size_); + return node; +} + +void SBUNode::Print(std::ostream &out) const { + out << (Name() + "(dataset dir: " + dataset_dir_ + ", decode: " + (decode_ ? "true" : "false") + + ", cache: " + ((cache_ != nullptr) ? "true" : "false") + ")"); +} + +Status SBUNode::ValidateParams() { + RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("SBUDataset", dataset_dir_)); + RETURN_IF_NOT_OK(ValidateDatasetSampler("SBUDataset", sampler_)); + + Path root_dir(dataset_dir_); + + Path url_path = root_dir / Path("SBU_captioned_photo_dataset_urls.txt"); + Path caption_path = root_dir / Path("SBU_captioned_photo_dataset_captions.txt"); + Path image_path = root_dir / Path("sbu_images"); + + RETURN_IF_NOT_OK(ValidateDatasetFilesParam("SBUDataset", {url_path.ToString()}, "url file")); + RETURN_IF_NOT_OK(ValidateDatasetFilesParam("SBUDataset", {caption_path.ToString()}, "caption file")); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("SBUDataset", {image_path.ToString()})); + + return Status::OK(); +} + +Status SBUNode::Build(std::vector> *const node_ops) { + // Do internal Schema generation. + auto schema = std::make_unique(); + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("caption", DataType(DataType::DE_UINT8), TensorImpl::kFlexible, 1))); + std::shared_ptr sampler_rt = nullptr; + RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); + + auto op = std::make_shared(dataset_dir_, decode_, std::move(schema), std::move(sampler_rt), num_workers_, + connector_que_size_); + op->SetTotalRepeats(GetTotalRepeats()); + op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + node_ops->push_back(op); + + return Status::OK(); +} + +// Get the shard id of node +Status SBUNode::GetShardId(int32_t *shard_id) { + *shard_id = sampler_->ShardId(); + + return Status::OK(); +} + +// Get Dataset size +Status SBUNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) { + if (dataset_size_ > 0) { + *dataset_size = dataset_size_; + return Status::OK(); + } + int64_t num_rows, sample_size; + RETURN_IF_NOT_OK(SBUOp::CountTotalRows(dataset_dir_, &num_rows)); + std::shared_ptr sampler_rt = nullptr; + RETURN_IF_NOT_OK(sampler_->SamplerBuild(&sampler_rt)); + sample_size = sampler_rt->CalculateNumSamples(num_rows); + if (sample_size == -1) { + RETURN_IF_NOT_OK(size_getter->DryRun(shared_from_this(), &sample_size)); + } + *dataset_size = sample_size; + dataset_size_ = *dataset_size; + return Status::OK(); +} + +Status SBUNode::to_json(nlohmann::json *out_json) { + nlohmann::json args, sampler_args; + RETURN_IF_NOT_OK(sampler_->to_json(&sampler_args)); + args["sampler"] = sampler_args; + args["num_parallel_workers"] = num_workers_; + args["connector_queue_size"] = connector_que_size_; + args["dataset_dir"] = dataset_dir_; + args["decode"] = decode_; + if (cache_ != nullptr) { + nlohmann::json cache_args; + RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); + args["cache"] = cache_args; + } + *out_json = args; + return Status::OK(); +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sbu_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sbu_node.h index 68e9fcdd..0d4d5e1d 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sbu_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sbu_node.h @@ -1,95 +1,95 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SBU_NODE_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SBU_NODE_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" - -namespace mindspore { -namespace dataset { -class SBUNode : public MappableSourceNode { - public: - /// \brief Constructor. - SBUNode(const std::string &dataset_dir, bool decode, const std::shared_ptr &sampler, - const std::shared_ptr &cache); - - /// \brief Destructor. - ~SBUNode() override = default; - - /// \brief Node name getter. - /// \return Name of the current node. - std::string Name() const override { return kSBUNode; } - - /// \brief Print the description. - /// \param out - The output stream to write output to. - void Print(std::ostream &out) const override; - - /// \brief Copy the node to a new object. - /// \return A shared pointer to the new copy. - std::shared_ptr Copy() override; - - /// \brief a base class override function to create the required runtime dataset op objects for this class. - /// \param node_ops - A vector containing shared pointer to the Dataset Ops that this object will create. - /// \return Status Status::OK() if build successfully. - Status Build(std::vector> *const node_ops) override; - - /// \brief Parameters validation. - /// \return Status Status::OK() if all the parameters are valid. - Status ValidateParams() override; - - /// \brief Get the shard id of node. - /// \param[in] shard_id The shard id. - /// \return Status Status::OK() if get shard id successfully. - Status GetShardId(int32_t *shard_id) override; - - /// \brief Base-class override for GetDatasetSize. - /// \param[in] size_getter Shared pointer to DatasetSizeGetter. - /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting - /// dataset size at the expense of accuracy. - /// \param[out] dataset_size the size of the dataset. - /// \return Status of the function. - Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) override; - - /// \brief Getter functions. - const std::string &DatasetDir() const { return dataset_dir_; } - bool Decode() const { return decode_; } - - /// \brief Get the arguments of node. - /// \param[out] out_json JSON string of all attributes. - /// \return Status of the function. - Status to_json(nlohmann::json *out_json) override; - - /// \brief Sampler getter. - /// \return SamplerObj of the current node. - std::shared_ptr Sampler() override { return sampler_; } - - /// \brief Sampler setter. - void SetSampler(std::shared_ptr sampler) override { sampler_ = sampler; } - - private: - std::string dataset_dir_; - bool decode_; - std::shared_ptr sampler_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SBU_NODE_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SBU_NODE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SBU_NODE_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" + +namespace mindspore::lite { +namespace dataset { +class SBUNode : public MappableSourceNode { + public: + /// \brief Constructor. + SBUNode(const std::string &dataset_dir, bool decode, const std::shared_ptr &sampler, + const std::shared_ptr &cache); + + /// \brief Destructor. + ~SBUNode() override = default; + + /// \brief Node name getter. + /// \return Name of the current node. + std::string Name() const override { return kSBUNode; } + + /// \brief Print the description. + /// \param out - The output stream to write output to. + void Print(std::ostream &out) const override; + + /// \brief Copy the node to a new object. + /// \return A shared pointer to the new copy. + std::shared_ptr Copy() override; + + /// \brief a base class override function to create the required runtime dataset op objects for this class. + /// \param node_ops - A vector containing shared pointer to the Dataset Ops that this object will create. + /// \return Status Status::OK() if build successfully. + Status Build(std::vector> *const node_ops) override; + + /// \brief Parameters validation. + /// \return Status Status::OK() if all the parameters are valid. + Status ValidateParams() override; + + /// \brief Get the shard id of node. + /// \param[in] shard_id The shard id. + /// \return Status Status::OK() if get shard id successfully. + Status GetShardId(int32_t *shard_id) override; + + /// \brief Base-class override for GetDatasetSize. + /// \param[in] size_getter Shared pointer to DatasetSizeGetter. + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting + /// dataset size at the expense of accuracy. + /// \param[out] dataset_size the size of the dataset. + /// \return Status of the function. + Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) override; + + /// \brief Getter functions. + const std::string &DatasetDir() const { return dataset_dir_; } + bool Decode() const { return decode_; } + + /// \brief Get the arguments of node. + /// \param[out] out_json JSON string of all attributes. + /// \return Status of the function. + Status to_json(nlohmann::json *out_json) override; + + /// \brief Sampler getter. + /// \return SamplerObj of the current node. + std::shared_ptr Sampler() override { return sampler_; } + + /// \brief Sampler setter. + void SetSampler(std::shared_ptr sampler) override { sampler_ = sampler; } + + private: + std::string dataset_dir_; + bool decode_; + std::shared_ptr sampler_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SBU_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/semeion_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/semeion_node.cc index d5c1ffd5..cb623e82 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/semeion_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/semeion_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/semeion_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for SemeionNode. SemeionNode::SemeionNode(const std::string &dataset_dir, const std::shared_ptr &sampler, @@ -110,4 +110,4 @@ Status SemeionNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/semeion_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/semeion_node.h index 1a7de56f..5a4598ae 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/semeion_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/semeion_node.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SemeionNode : public MappableSourceNode { public: @@ -90,5 +90,5 @@ class SemeionNode : public MappableSourceNode { std::shared_ptr sampler_; }; // class SemeionNode } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SEMEION_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sogou_news_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sogou_news_node.cc index 5f7eb7ba..7b2b160c 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sogou_news_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sogou_news_node.cc @@ -1,196 +1,196 @@ -/** - * Copyright 2021-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sogou_news_node.h" - -#include -#include - -#include "mindspore-lite/minddata/dataset/util/path.h" -#include "mindspore-lite/minddata/dataset/util/status.h" - -namespace mindspore { -namespace dataset { -SogouNewsNode::SogouNewsNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, - ShuffleMode shuffle, int32_t num_shards, int32_t shard_id, - const std::shared_ptr &cache) - : NonMappableSourceNode(std::move(cache)), - dataset_dir_(dataset_dir), - num_samples_(num_samples), - shuffle_(shuffle), - num_shards_(num_shards), - shard_id_(shard_id), - usage_(usage), - sogou_news_files_list_(WalkAllFiles(usage, dataset_dir)) { - // Update the num_shards_ in global context. this number is only used for now by auto_num_worker_pass. - // User discretion is advised. Auto_num_worker_pass is currently an experimental feature which can still work - // if the num_shards_ isn't 100% correct. The reason behind is for now, PreBuildSampler doesn't offer a way to - // return num_shards. Once PreBuildSampler is phased out, this can be cleaned up. - GlobalContext::config_manager()->set_num_shards_for_auto_num_workers(num_shards_); -} - -std::shared_ptr SogouNewsNode::Copy() { - auto node = - std::make_shared(dataset_dir_, usage_, num_samples_, shuffle_, num_shards_, shard_id_, cache_); - (void)node->SetNumWorkers(num_workers_); - (void)node->SetConnectorQueueSize(connector_que_size_); - return node; -} - -void SogouNewsNode::Print(std::ostream &out) const { - out << (Name() + "(cache: " + ((cache_ != nullptr) ? "true" : "false") + - ", num_shards: " + std::to_string(num_shards_) + ", shard_id: " + std::to_string(shard_id_) + ")"); -} - -Status SogouNewsNode::ValidateParams() { - RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("SogouNewsNode", dataset_dir_)); - RETURN_IF_NOT_OK(ValidateStringValue("SogouNewsNode", usage_, {"train", "test", "all"})); - RETURN_IF_NOT_OK(ValidateEnum("SogouNewsNode", "ShuffleMode", shuffle_, - {ShuffleMode::kFalse, ShuffleMode::kFiles, ShuffleMode::kGlobal})); - if (num_samples_ < 0) { - std::string err_msg = "SogouNewsNode: Invalid number of samples: " + std::to_string(num_samples_); - MS_LOG(ERROR) << err_msg; - LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg); - } - - RETURN_IF_NOT_OK(ValidateDatasetShardParams("SogouNewsNode", num_shards_, shard_id_)); - return Status::OK(); -} - -Status SogouNewsNode::Build(std::vector> *const node_ops) { - bool shuffle_files = (shuffle_ == ShuffleMode::kGlobal || shuffle_ == ShuffleMode::kFiles); - - // Sort the dataset files in a lexicographical order. - std::vector sorted_dataset_files = sogou_news_files_list_; - std::sort(sorted_dataset_files.begin(), sorted_dataset_files.end()); - - std::vector> column_default; - column_default.push_back(std::make_shared>(SogouNewsOp::STRING, "")); - column_default.push_back(std::make_shared>(SogouNewsOp::STRING, "")); - column_default.push_back(std::make_shared>(SogouNewsOp::STRING, "")); - - std::vector column_name = {"index", "title", "content"}; - char field_delim = ','; - auto sogou_news_op = std::make_shared(num_workers_, num_samples_, worker_connector_size_, - connector_que_size_, shuffle_files, num_shards_, shard_id_, - field_delim, column_default, column_name, sogou_news_files_list_); - - RETURN_IF_NOT_OK(sogou_news_op->Init()); - - // If a global shuffle is used for SogouNews, it will inject a shuffle op over the SogouNews. - // But, if there is a cache in the tree, we do not need the global shuffle and the shuffle op should not be - // built.This is achieved in the cache transform pass where we call MakeSimpleProducer to reset SogouNews - // shuffle option to false. - if (shuffle_ == ShuffleMode::kGlobal) { - // Inject ShuffleOp. - std::shared_ptr shuffle_op = nullptr; - int64_t num_rows = 0; - - // First, get the number of rows in the dataset. - RETURN_IF_NOT_OK(SogouNewsOp::CountAllFileRows(sogou_news_files_list_, false, &num_rows)); - // Add the shuffle op after this op. - RETURN_IF_NOT_OK( - AddShuffleOp(sorted_dataset_files.size(), num_shards_, num_rows, 0, connector_que_size_, &shuffle_op)); - shuffle_op->SetTotalRepeats(GetTotalRepeats()); - shuffle_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - shuffle_op->Skip(skip_steps_); - node_ops->push_back(shuffle_op); - } - sogou_news_op->SetTotalRepeats(GetTotalRepeats()); - sogou_news_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - node_ops->push_back(sogou_news_op); - return Status::OK(); -} - -Status SogouNewsNode::GetShardId(int32_t *shard_id) { - *shard_id = shard_id_; - return Status::OK(); -} - -Status SogouNewsNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) { - if (dataset_size_ > 0) { - *dataset_size = dataset_size_; - return Status::OK(); - } - - int64_t num_rows, sample_size; - RETURN_IF_NOT_OK(SogouNewsOp::CountAllFileRows(sogou_news_files_list_, false, &num_rows)); - sample_size = num_samples_; - num_rows = static_cast(ceil(num_rows / (1.0 * num_shards_))); - *dataset_size = sample_size > 0 ? std::min(num_rows, sample_size) : num_rows; - dataset_size_ = *dataset_size; - return Status::OK(); -} - -Status SogouNewsNode::to_json(nlohmann::json *out_json) { - nlohmann::json args; - args["num_parallel_workers"] = num_workers_; - args["connector_queue_size"] = connector_que_size_; - args["dataset_dir"] = dataset_dir_; - args["usage"] = usage_; - args["num_samples"] = num_samples_; - args["shuffle"] = shuffle_; - args["num_shards"] = num_shards_; - args["shard_id"] = shard_id_; - if (cache_ != nullptr) { - nlohmann::json cache_args; - RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); - args["cache"] = cache_args; - } - *out_json = args; - return Status::OK(); -} - -Status SogouNewsNode::SetupSamplerForCache(std::shared_ptr *sampler) { - *sampler = SelectSampler(num_samples_, shuffle_, num_shards_, shard_id_); - return Status::OK(); -} - -Status SogouNewsNode::MakeSimpleProducer() { - shard_id_ = 0; - num_shards_ = 1; - shuffle_ = ShuffleMode::kFalse; - num_samples_ = 0; - return Status::OK(); -} - -std::vector SogouNewsNode::WalkAllFiles(const std::string &usage, const std::string &dataset_dir) { - std::vector sogou_news_files_list; - Path train_prefix("train.csv"); - Path test_prefix("test.csv"); - Path dir(dataset_dir); - - if (usage == "train") { - Path temp_path = dir / train_prefix; - sogou_news_files_list.push_back(temp_path.ToString()); - } else if (usage == "test") { - Path temp_path = dir / test_prefix; - sogou_news_files_list.push_back(temp_path.ToString()); - } else { - Path temp_path = dir / train_prefix; - if (temp_path.Exists()) { - sogou_news_files_list.push_back(temp_path.ToString()); - } - Path temp_path1 = dir / test_prefix; - if (temp_path1.Exists()) { - sogou_news_files_list.push_back(temp_path1.ToString()); - } - } - return sogou_news_files_list; -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sogou_news_node.h" + +#include +#include + +#include "mindspore-lite/minddata/dataset/util/path.h" +#include "mindspore-lite/minddata/dataset/util/status.h" + +namespace mindspore::lite { +namespace dataset { +SogouNewsNode::SogouNewsNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, + ShuffleMode shuffle, int32_t num_shards, int32_t shard_id, + const std::shared_ptr &cache) + : NonMappableSourceNode(std::move(cache)), + dataset_dir_(dataset_dir), + num_samples_(num_samples), + shuffle_(shuffle), + num_shards_(num_shards), + shard_id_(shard_id), + usage_(usage), + sogou_news_files_list_(WalkAllFiles(usage, dataset_dir)) { + // Update the num_shards_ in global context. this number is only used for now by auto_num_worker_pass. + // User discretion is advised. Auto_num_worker_pass is currently an experimental feature which can still work + // if the num_shards_ isn't 100% correct. The reason behind is for now, PreBuildSampler doesn't offer a way to + // return num_shards. Once PreBuildSampler is phased out, this can be cleaned up. + GlobalContext::config_manager()->set_num_shards_for_auto_num_workers(num_shards_); +} + +std::shared_ptr SogouNewsNode::Copy() { + auto node = + std::make_shared(dataset_dir_, usage_, num_samples_, shuffle_, num_shards_, shard_id_, cache_); + (void)node->SetNumWorkers(num_workers_); + (void)node->SetConnectorQueueSize(connector_que_size_); + return node; +} + +void SogouNewsNode::Print(std::ostream &out) const { + out << (Name() + "(cache: " + ((cache_ != nullptr) ? "true" : "false") + + ", num_shards: " + std::to_string(num_shards_) + ", shard_id: " + std::to_string(shard_id_) + ")"); +} + +Status SogouNewsNode::ValidateParams() { + RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("SogouNewsNode", dataset_dir_)); + RETURN_IF_NOT_OK(ValidateStringValue("SogouNewsNode", usage_, {"train", "test", "all"})); + RETURN_IF_NOT_OK(ValidateEnum("SogouNewsNode", "ShuffleMode", shuffle_, + {ShuffleMode::kFalse, ShuffleMode::kFiles, ShuffleMode::kGlobal})); + if (num_samples_ < 0) { + std::string err_msg = "SogouNewsNode: Invalid number of samples: " + std::to_string(num_samples_); + MS_LOG(ERROR) << err_msg; + LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg); + } + + RETURN_IF_NOT_OK(ValidateDatasetShardParams("SogouNewsNode", num_shards_, shard_id_)); + return Status::OK(); +} + +Status SogouNewsNode::Build(std::vector> *const node_ops) { + bool shuffle_files = (shuffle_ == ShuffleMode::kGlobal || shuffle_ == ShuffleMode::kFiles); + + // Sort the dataset files in a lexicographical order. + std::vector sorted_dataset_files = sogou_news_files_list_; + std::sort(sorted_dataset_files.begin(), sorted_dataset_files.end()); + + std::vector> column_default; + column_default.push_back(std::make_shared>(SogouNewsOp::STRING, "")); + column_default.push_back(std::make_shared>(SogouNewsOp::STRING, "")); + column_default.push_back(std::make_shared>(SogouNewsOp::STRING, "")); + + std::vector column_name = {"index", "title", "content"}; + char field_delim = ','; + auto sogou_news_op = std::make_shared(num_workers_, num_samples_, worker_connector_size_, + connector_que_size_, shuffle_files, num_shards_, shard_id_, + field_delim, column_default, column_name, sogou_news_files_list_); + + RETURN_IF_NOT_OK(sogou_news_op->Init()); + + // If a global shuffle is used for SogouNews, it will inject a shuffle op over the SogouNews. + // But, if there is a cache in the tree, we do not need the global shuffle and the shuffle op should not be + // built.This is achieved in the cache transform pass where we call MakeSimpleProducer to reset SogouNews + // shuffle option to false. + if (shuffle_ == ShuffleMode::kGlobal) { + // Inject ShuffleOp. + std::shared_ptr shuffle_op = nullptr; + int64_t num_rows = 0; + + // First, get the number of rows in the dataset. + RETURN_IF_NOT_OK(SogouNewsOp::CountAllFileRows(sogou_news_files_list_, false, &num_rows)); + // Add the shuffle op after this op. + RETURN_IF_NOT_OK( + AddShuffleOp(sorted_dataset_files.size(), num_shards_, num_rows, 0, connector_que_size_, &shuffle_op)); + shuffle_op->SetTotalRepeats(GetTotalRepeats()); + shuffle_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + shuffle_op->Skip(skip_steps_); + node_ops->push_back(shuffle_op); + } + sogou_news_op->SetTotalRepeats(GetTotalRepeats()); + sogou_news_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + node_ops->push_back(sogou_news_op); + return Status::OK(); +} + +Status SogouNewsNode::GetShardId(int32_t *shard_id) { + *shard_id = shard_id_; + return Status::OK(); +} + +Status SogouNewsNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) { + if (dataset_size_ > 0) { + *dataset_size = dataset_size_; + return Status::OK(); + } + + int64_t num_rows, sample_size; + RETURN_IF_NOT_OK(SogouNewsOp::CountAllFileRows(sogou_news_files_list_, false, &num_rows)); + sample_size = num_samples_; + num_rows = static_cast(ceil(num_rows / (1.0 * num_shards_))); + *dataset_size = sample_size > 0 ? std::min(num_rows, sample_size) : num_rows; + dataset_size_ = *dataset_size; + return Status::OK(); +} + +Status SogouNewsNode::to_json(nlohmann::json *out_json) { + nlohmann::json args; + args["num_parallel_workers"] = num_workers_; + args["connector_queue_size"] = connector_que_size_; + args["dataset_dir"] = dataset_dir_; + args["usage"] = usage_; + args["num_samples"] = num_samples_; + args["shuffle"] = shuffle_; + args["num_shards"] = num_shards_; + args["shard_id"] = shard_id_; + if (cache_ != nullptr) { + nlohmann::json cache_args; + RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); + args["cache"] = cache_args; + } + *out_json = args; + return Status::OK(); +} + +Status SogouNewsNode::SetupSamplerForCache(std::shared_ptr *sampler) { + *sampler = SelectSampler(num_samples_, shuffle_, num_shards_, shard_id_); + return Status::OK(); +} + +Status SogouNewsNode::MakeSimpleProducer() { + shard_id_ = 0; + num_shards_ = 1; + shuffle_ = ShuffleMode::kFalse; + num_samples_ = 0; + return Status::OK(); +} + +std::vector SogouNewsNode::WalkAllFiles(const std::string &usage, const std::string &dataset_dir) { + std::vector sogou_news_files_list; + Path train_prefix("train.csv"); + Path test_prefix("test.csv"); + Path dir(dataset_dir); + + if (usage == "train") { + Path temp_path = dir / train_prefix; + sogou_news_files_list.push_back(temp_path.ToString()); + } else if (usage == "test") { + Path temp_path = dir / test_prefix; + sogou_news_files_list.push_back(temp_path.ToString()); + } else { + Path temp_path = dir / train_prefix; + if (temp_path.Exists()) { + sogou_news_files_list.push_back(temp_path.ToString()); + } + Path temp_path1 = dir / test_prefix; + if (temp_path1.Exists()) { + sogou_news_files_list.push_back(temp_path1.ToString()); + } + } + return sogou_news_files_list; +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sogou_news_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sogou_news_node.h index 99abfcd6..7dcdc53c 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sogou_news_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sogou_news_node.h @@ -1,135 +1,135 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SOGOU_NEWS_NODE_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SOGOU_NEWS_NODE_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.h" -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" - -namespace mindspore { -namespace dataset { -/// \class SogouNewsNode -/// \brief A Node derived class to represent SogouNews Node. -class SogouNewsNode : public NonMappableSourceNode { - public: - /// \brief Constructor of SogouNewsNode. - /// \param[in] dataset_dir Path to the root directory that contains the dataset. - /// \param[in] usage Part of dataset of SogouNews, can be "train", "test" or "all" data. - /// \param[in] num_samples The number of samples to be included in the dataset. - /// \param[in] shuffle The mode for shuffling data every epoch. - /// Can be any of: - /// ShuffleMode::kFalse - No shuffling is performed. - /// ShuffleMode::kFiles - Shuffle files only. - /// ShuffleMode::kGlobal - Shuffle both the files and samples. - /// \param[in] num_shards Number of shards that the dataset should be divided into. - /// \param[in] shard_id The shard ID within num_shards. This argument should be - /// specified only when num_shards is also specified. - /// \param[in] cache Tensor cache to use. - SogouNewsNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, - int32_t num_shards, int32_t shard_id, const std::shared_ptr &cache); - - /// \brief Destructor. - ~SogouNewsNode() override = default; - - /// \brief Node name getter. - /// \return Name of the current node. - std::string Name() const override { return kSogouNewsNode; } - - /// \brief Print the description. - /// \param[out] out The output stream to write output to. - void Print(std::ostream &out) const override; - - /// \brief Copy the node to a new object. - /// \return A shared pointer to the new copy. - std::shared_ptr Copy() override; - - /// \brief A base class override function to create the required runtime dataset op objects for this class. - /// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create. - /// \return Status Status::OK() if build successfully. - Status Build(std::vector> *const node_ops) override; - - /// \brief Parameters validation. - /// \return Status Status::OK() if all the parameters are valid. - Status ValidateParams() override; - - /// \brief Get the shard id of node. - /// \param[in] shard_id The shard id. - /// \return Status Status::OK() if get shard id successfully. - Status GetShardId(int32_t *shard_id) override; - - /// \brief Base-class override for GetDatasetSize. - /// \param[in] size_getter Shared pointer to DatasetSizeGetter. - /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting. - /// dataset size at the expense of accuracy. - /// \param[out] dataset_size The size of the dataset. - /// \return Status of the function. - Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) override; - - /// \brief Getter functions. - const std::string &DatasetDir() const { return dataset_dir_; } - const std::string &Usage() const { return usage_; } - int64_t NumSamples() const { return num_samples_; } - ShuffleMode Shuffle() const { return shuffle_; } - int32_t NumShards() const { return num_shards_; } - int32_t ShardId() const { return shard_id_; } - - /// \brief Get the arguments of node. - /// \param[out] out_json JSON string of all attributes. - /// \return Status of the function. - Status to_json(nlohmann::json *out_json) override; - - /// \brief SogouNews by itself is a non-mappable dataset that does not support sampling. - /// However, if a cache operator is injected at some other place higher in the tree, that cache can - /// inherit this sampler from the leaf, providing sampling support from the caching layer. - /// That is why we setup the sampler for a leaf node that does not use sampling. - /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. - /// \param[in] sampler The sampler to setup. - /// \return Status of the function. - Status SetupSamplerForCache(std::shared_ptr *sampler) override; - - /// \brief If a cache has been added into the ascendant tree over this clue node, then the cache will be executing - /// a sampler for fetching the data. As such, any options in the clue node need to be reset to its defaults so. - /// that this clue node will produce the full set of data into the cache. - /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. - /// \return Status of the function. - Status MakeSimpleProducer() override; - - /// \brief Generate a list of read file names according to usage. - /// \param[in] usage Part of dataset of SogouNews. - /// \param[in] dataset_dir Path to the root directory that contains the dataset. - /// \return std::vector A list of read file names. - std::vector WalkAllFiles(const std::string &usage, const std::string &dataset_dir); - - private: - std::string dataset_dir_; - std::string usage_; - std::vector> column_defaults_; - std::vector column_names_; - int64_t num_samples_; - ShuffleMode shuffle_; - int32_t num_shards_; - int32_t shard_id_; - std::vector sogou_news_files_list_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SOGOU_NEWS_NODE_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SOGOU_NEWS_NODE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SOGOU_NEWS_NODE_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/sogou_news_op.h" +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" + +namespace mindspore::lite { +namespace dataset { +/// \class SogouNewsNode +/// \brief A Node derived class to represent SogouNews Node. +class SogouNewsNode : public NonMappableSourceNode { + public: + /// \brief Constructor of SogouNewsNode. + /// \param[in] dataset_dir Path to the root directory that contains the dataset. + /// \param[in] usage Part of dataset of SogouNews, can be "train", "test" or "all" data. + /// \param[in] num_samples The number of samples to be included in the dataset. + /// \param[in] shuffle The mode for shuffling data every epoch. + /// Can be any of: + /// ShuffleMode::kFalse - No shuffling is performed. + /// ShuffleMode::kFiles - Shuffle files only. + /// ShuffleMode::kGlobal - Shuffle both the files and samples. + /// \param[in] num_shards Number of shards that the dataset should be divided into. + /// \param[in] shard_id The shard ID within num_shards. This argument should be + /// specified only when num_shards is also specified. + /// \param[in] cache Tensor cache to use. + SogouNewsNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, + int32_t num_shards, int32_t shard_id, const std::shared_ptr &cache); + + /// \brief Destructor. + ~SogouNewsNode() override = default; + + /// \brief Node name getter. + /// \return Name of the current node. + std::string Name() const override { return kSogouNewsNode; } + + /// \brief Print the description. + /// \param[out] out The output stream to write output to. + void Print(std::ostream &out) const override; + + /// \brief Copy the node to a new object. + /// \return A shared pointer to the new copy. + std::shared_ptr Copy() override; + + /// \brief A base class override function to create the required runtime dataset op objects for this class. + /// \param node_ops A vector containing shared pointer to the Dataset Ops that this object will create. + /// \return Status Status::OK() if build successfully. + Status Build(std::vector> *const node_ops) override; + + /// \brief Parameters validation. + /// \return Status Status::OK() if all the parameters are valid. + Status ValidateParams() override; + + /// \brief Get the shard id of node. + /// \param[in] shard_id The shard id. + /// \return Status Status::OK() if get shard id successfully. + Status GetShardId(int32_t *shard_id) override; + + /// \brief Base-class override for GetDatasetSize. + /// \param[in] size_getter Shared pointer to DatasetSizeGetter. + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting. + /// dataset size at the expense of accuracy. + /// \param[out] dataset_size The size of the dataset. + /// \return Status of the function. + Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) override; + + /// \brief Getter functions. + const std::string &DatasetDir() const { return dataset_dir_; } + const std::string &Usage() const { return usage_; } + int64_t NumSamples() const { return num_samples_; } + ShuffleMode Shuffle() const { return shuffle_; } + int32_t NumShards() const { return num_shards_; } + int32_t ShardId() const { return shard_id_; } + + /// \brief Get the arguments of node. + /// \param[out] out_json JSON string of all attributes. + /// \return Status of the function. + Status to_json(nlohmann::json *out_json) override; + + /// \brief SogouNews by itself is a non-mappable dataset that does not support sampling. + /// However, if a cache operator is injected at some other place higher in the tree, that cache can + /// inherit this sampler from the leaf, providing sampling support from the caching layer. + /// That is why we setup the sampler for a leaf node that does not use sampling. + /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. + /// \param[in] sampler The sampler to setup. + /// \return Status of the function. + Status SetupSamplerForCache(std::shared_ptr *sampler) override; + + /// \brief If a cache has been added into the ascendant tree over this clue node, then the cache will be executing + /// a sampler for fetching the data. As such, any options in the clue node need to be reset to its defaults so. + /// that this clue node will produce the full set of data into the cache. + /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. + /// \return Status of the function. + Status MakeSimpleProducer() override; + + /// \brief Generate a list of read file names according to usage. + /// \param[in] usage Part of dataset of SogouNews. + /// \param[in] dataset_dir Path to the root directory that contains the dataset. + /// \return std::vector A list of read file names. + std::vector WalkAllFiles(const std::string &usage, const std::string &dataset_dir); + + private: + std::string dataset_dir_; + std::string usage_; + std::vector> column_defaults_; + std::vector column_names_; + int64_t num_samples_; + ShuffleMode shuffle_; + int32_t num_shards_; + int32_t shard_id_; + std::vector sogou_news_files_list_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SOGOU_NEWS_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/speech_commands_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/speech_commands_node.cc index bec0ea59..9890a807 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/speech_commands_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/speech_commands_node.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/speech_commands_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { SpeechCommandsNode::SpeechCommandsNode(const std::string &dataset_dir, const std::string &usage, std::shared_ptr sampler, std::shared_ptr cache) @@ -117,4 +117,4 @@ Status SpeechCommandsNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/speech_commands_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/speech_commands_node.h index ac40447a..b3004f3b 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/speech_commands_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/speech_commands_node.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SpeechCommandsNode : public MappableSourceNode { public: @@ -93,5 +93,5 @@ class SpeechCommandsNode : public MappableSourceNode { std::shared_ptr sampler_; }; // class SpeechCommandsNode } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SPEECH_COMMANDS_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/squad_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/squad_node.cc index 12ea58bd..02f5509e 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/squad_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/squad_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/squad_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for SQuADNode. SQuADNode::SQuADNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, @@ -173,4 +173,4 @@ Status SQuADNode::MakeSimpleProducer() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/squad_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/squad_node.h index 0c4f5cc6..819cd103 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/squad_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/squad_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class SQuADNode. /// \brief A Dataset derived class to represent SQuAD dataset. @@ -110,5 +110,5 @@ class SQuADNode : public NonMappableSourceNode { int32_t shard_id_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SQUAD_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sst2_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sst2_node.cc index 55ac8ae3..a1b9a377 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sst2_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sst2_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/util/path.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { SST2Node::SST2Node(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, int32_t num_shards, int32_t shard_id, std::shared_ptr cache) @@ -199,4 +199,4 @@ Status SST2Node::MakeSimpleProducer() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sst2_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sst2_node.h index ea24d9f3..d84f2efe 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sst2_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sst2_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sst2_op.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SST2Node : public NonMappableSourceNode { public: @@ -116,5 +116,5 @@ class SST2Node : public NonMappableSourceNode { int32_t shard_id_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SST2_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/stl10_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/stl10_node.cc index 8ad8bd7b..05987b96 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/stl10_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/stl10_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/stl10_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { STL10Node::STL10Node(const std::string &dataset_dir, const std::string &usage, std::shared_ptr sampler, std::shared_ptr cache) @@ -118,4 +118,4 @@ Status STL10Node::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/stl10_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/stl10_node.h index b0aaf8c9..6040e2e0 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/stl10_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/stl10_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class STL10Node : public MappableSourceNode { public: @@ -92,5 +92,5 @@ class STL10Node : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_STL10_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sun397_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sun397_node.cc index fc3e2a81..2a40569a 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sun397_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sun397_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { SUN397Node::SUN397Node(const std::string &dataset_dir, bool decode, const std::shared_ptr &sampler, std::shared_ptr cache) @@ -111,4 +111,4 @@ Status SUN397Node::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sun397_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sun397_node.h index 76d80660..97fac980 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sun397_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/sun397_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/sun397_op.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class SUN397Node. /// \brief A Dataset derived class to represent SUN397 dataset. @@ -99,5 +99,5 @@ class SUN397Node : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_SUN397_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tedlium_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tedlium_node.cc index 94775a7c..4e739584 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tedlium_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tedlium_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/tedlium_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for TedliumNode. TedliumNode::TedliumNode(const std::string &dataset_dir, const std::string &release, const std::string &usage, @@ -151,4 +151,4 @@ Status TedliumNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tedlium_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tedlium_node.h index 2d462265..83ec0ff4 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tedlium_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tedlium_node.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TedliumNode : public MappableSourceNode { public: @@ -106,5 +106,5 @@ class TedliumNode : public MappableSourceNode { std::shared_ptr sampler_; }; // class TedliumNode } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_TEDLIUM_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/text_file_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/text_file_node.cc index 6dc87a79..848ba05b 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/text_file_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/text_file_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/text_file_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for TextFileNode @@ -191,4 +191,4 @@ Status TextFileNode::MakeSimpleProducer() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/text_file_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/text_file_node.h index 09ce9de1..1648035f 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/text_file_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/text_file_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class TextFileNode /// \brief A Dataset derived class to represent TextFile dataset @@ -112,5 +112,5 @@ class TextFileNode : public NonMappableSourceNode { ShuffleMode shuffle_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_TEXT_FILE_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc index f1d1c028..4c218a87 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tf_record_node.cc @@ -25,7 +25,7 @@ #include "utils/file_utils.h" #include "utils/system/crc32c.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { std::unordered_set TFRecordNode::large_files_ = {}; const int64_t kTFRecordFileLimit = 0x140000000; @@ -420,4 +420,4 @@ Status TFRecordNode::AcceptAfter(IRNodePass *const p, bool *const modified) { return p->VisitAfter(shared_from_base(), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tf_record_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tf_record_node.h index 68008d8d..45628274 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tf_record_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/tf_record_node.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/tf_reader_op.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class TFRecordNode /// \brief A Dataset derived class to represent TFRecord dataset @@ -204,5 +204,5 @@ class TFRecordNode : public NonMappableSourceNode { static std::unordered_set large_files_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_TF_RECORD_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/udpos_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/udpos_node.cc index 6fe42a84..1e0c042d 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/udpos_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/udpos_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/udpos_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for UDPOSNode. UDPOSNode::UDPOSNode(const std::string &dataset_dir, const std::string &usage, int32_t num_samples, ShuffleMode shuffle, @@ -205,4 +205,4 @@ std::vector UDPOSNode::WalkAllFiles(const std::string &usage, const return udpos_files_list; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/udpos_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/udpos_node.h index 217790da..f5c8ef1e 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/udpos_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/udpos_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class UDPOSNode. /// \brief A Dataset derived class to represent UDPOS dataset. @@ -116,5 +116,5 @@ class UDPOSNode : public NonMappableSourceNode { std::vector udpos_files_list_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_UDPOS_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/usps_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/usps_node.cc index 5691fca5..60c06bd7 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/usps_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/usps_node.cc @@ -1,170 +1,170 @@ -/** - * Copyright 2021-2022 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/usps_node.h" - -#include -#include -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/datasetops/source/usps_op.h" -#include "mindspore-lite/minddata/dataset/util/status.h" - -namespace mindspore { -namespace dataset { -USPSNode::USPSNode(const std::string &dataset_dir, const std::string &usage, int32_t num_samples, ShuffleMode shuffle, - int32_t num_shards, int32_t shard_id, std::shared_ptr cache) - : NonMappableSourceNode(std::move(cache)), - dataset_dir_(dataset_dir), - usage_(usage), - num_samples_(num_samples), - shuffle_(shuffle), - num_shards_(num_shards), - shard_id_(shard_id) { - // Update the num_shards_ in global context. this number is only used for now by auto_num_worker_pass. User discretion - // is advised. Auto_num_worker_pass is currently an experimental feature which can still work if the num_shards_ isn't - // 100% correct. The reason behind is for now, PreBuildSampler doesn't offer a way to return num_shards. Once - // PreBuildSampler is phased out, this can be cleaned up. - GlobalContext::config_manager()->set_num_shards_for_auto_num_workers(num_shards_); -} - -std::shared_ptr USPSNode::Copy() { - auto node = std::make_shared(dataset_dir_, usage_, num_samples_, shuffle_, num_shards_, shard_id_, cache_); - (void)node->SetNumWorkers(num_workers_); - (void)node->SetConnectorQueueSize(connector_que_size_); - return node; -} - -void USPSNode::Print(std::ostream &out) const { - out << (Name() + "(dataset dir:" + dataset_dir_ + ", usage:" + usage_ + - ", num_shards:" + std::to_string(num_shards_) + ", shard_id:" + std::to_string(shard_id_) + - ", num_samples:" + std::to_string(num_samples_) + ")"); -} - -Status USPSNode::ValidateParams() { - RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); - RETURN_IF_NOT_OK(ValidateDatasetDirParam("USPSDataset", dataset_dir_)); - RETURN_IF_NOT_OK(ValidateStringValue("USPSDataset", usage_, {"train", "test", "all"})); - RETURN_IF_NOT_OK(ValidateScalar("USPSDataset", "num_samples", num_samples_, {0}, false)); - RETURN_IF_NOT_OK(ValidateDatasetShardParams("USPSDataset", num_shards_, shard_id_)); - RETURN_IF_NOT_OK(ValidateEnum("USPSDataset", "ShuffleMode", shuffle_, - {ShuffleMode::kFalse, ShuffleMode::kFiles, ShuffleMode::kGlobal})); - return Status::OK(); -} - -Status USPSNode::Build(std::vector> *const node_ops) { - bool shuffle_files = (shuffle_ == ShuffleMode::kGlobal || shuffle_ == ShuffleMode::kFiles); - - // Do internal Schema generation. - auto schema = std::make_unique(); - RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kCv, 1))); - TensorShape scalar = TensorShape::CreateScalar(); - RETURN_IF_NOT_OK( - schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar))); - - auto op = std::make_shared(dataset_dir_, usage_, std::move(schema), num_workers_, worker_connector_size_, - num_samples_, connector_que_size_, shuffle_files, num_shards_, shard_id_); - RETURN_IF_NOT_OK(op->Init()); - - // If a global shuffle is used for USPS, it will inject a shuffle op over the USPS. - // But, if there is a cache in the tree, we do not need the global shuffle and the shuffle op should not be built. - // This is achieved in the cache transform pass where we call MakeSimpleProducer to reset USPS's shuffle - // option to false. - if (shuffle_ == ShuffleMode::kGlobal) { - // Inject ShuffleOp - std::shared_ptr shuffle_op = nullptr; - int64_t num_rows = 0; - - // First, get the number of rows in the dataset - RETURN_IF_NOT_OK(USPSOp::CountTotalRows(dataset_dir_, usage_, &num_rows)); - - // Add the shuffle op after this op - RETURN_IF_NOT_OK(AddShuffleOp(op->FileNames().size(), num_shards_, num_rows, 0, connector_que_size_, &shuffle_op)); - shuffle_op->SetTotalRepeats(GetTotalRepeats()); - shuffle_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - shuffle_op->Skip(skip_steps_); - node_ops->push_back(shuffle_op); - } - op->SetTotalRepeats(GetTotalRepeats()); - op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); - node_ops->push_back(op); - return Status::OK(); -} - -// Get the shard id of node -Status USPSNode::GetShardId(int32_t *shard_id) { - *shard_id = shard_id_; - return Status::OK(); -} - -// Get Dataset size -Status USPSNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) { - if (dataset_size_ > 0) { - *dataset_size = dataset_size_; - return Status::OK(); - } - int64_t num_rows, sample_size = num_samples_; - RETURN_IF_NOT_OK(USPSOp::CountTotalRows(dataset_dir_, usage_, &num_rows)); - num_rows = static_cast(ceil(num_rows / (1.0 * num_shards_))); - *dataset_size = sample_size > 0 ? std::min(num_rows, sample_size) : num_rows; - dataset_size_ = *dataset_size; - return Status::OK(); -} - -Status USPSNode::to_json(nlohmann::json *out_json) { - nlohmann::json args; - args["num_parallel_workers"] = num_workers_; - args["connector_queue_size"] = connector_que_size_; - args["dataset_dir"] = dataset_dir_; - args["usage"] = usage_; - args["num_samples"] = num_samples_; - args["shuffle"] = shuffle_; - args["num_shards"] = num_shards_; - args["shard_id"] = shard_id_; - if (cache_ != nullptr) { - nlohmann::json cache_args; - RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); - args["cache"] = cache_args; - } - *out_json = args; - return Status::OK(); -} - -// Note: The following two functions are common among NonMappableSourceNode and should be promoted to its parent class. -// USPS by itself is a non-mappable dataset that does not support sampling. -// However, if a cache operator is injected at some other place higher in the tree, that cache can -// inherit this sampler from the leaf, providing sampling support from the caching layer. -// That is why we setup the sampler for a leaf node that does not use sampling. -Status USPSNode::SetupSamplerForCache(std::shared_ptr *sampler) { - *sampler = SelectSampler(num_samples_, shuffle_, num_shards_, shard_id_); - return Status::OK(); -} - -// If a cache has been added into the ascendant tree over this USPS node, then the cache will be executing -// a sampler for fetching the data. As such, any options in the USPS node need to be reset to its defaults so -// that this USPS node will produce the full set of data into the cache. -Status USPSNode::MakeSimpleProducer() { - shard_id_ = 0; - num_shards_ = 1; - shuffle_ = ShuffleMode::kFalse; - num_samples_ = 0; - return Status::OK(); -} -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2021-2022 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/usps_node.h" + +#include +#include +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/datasetops/source/usps_op.h" +#include "mindspore-lite/minddata/dataset/util/status.h" + +namespace mindspore::lite { +namespace dataset { +USPSNode::USPSNode(const std::string &dataset_dir, const std::string &usage, int32_t num_samples, ShuffleMode shuffle, + int32_t num_shards, int32_t shard_id, std::shared_ptr cache) + : NonMappableSourceNode(std::move(cache)), + dataset_dir_(dataset_dir), + usage_(usage), + num_samples_(num_samples), + shuffle_(shuffle), + num_shards_(num_shards), + shard_id_(shard_id) { + // Update the num_shards_ in global context. this number is only used for now by auto_num_worker_pass. User discretion + // is advised. Auto_num_worker_pass is currently an experimental feature which can still work if the num_shards_ isn't + // 100% correct. The reason behind is for now, PreBuildSampler doesn't offer a way to return num_shards. Once + // PreBuildSampler is phased out, this can be cleaned up. + GlobalContext::config_manager()->set_num_shards_for_auto_num_workers(num_shards_); +} + +std::shared_ptr USPSNode::Copy() { + auto node = std::make_shared(dataset_dir_, usage_, num_samples_, shuffle_, num_shards_, shard_id_, cache_); + (void)node->SetNumWorkers(num_workers_); + (void)node->SetConnectorQueueSize(connector_que_size_); + return node; +} + +void USPSNode::Print(std::ostream &out) const { + out << (Name() + "(dataset dir:" + dataset_dir_ + ", usage:" + usage_ + + ", num_shards:" + std::to_string(num_shards_) + ", shard_id:" + std::to_string(shard_id_) + + ", num_samples:" + std::to_string(num_samples_) + ")"); +} + +Status USPSNode::ValidateParams() { + RETURN_IF_NOT_OK(DatasetNode::ValidateParams()); + RETURN_IF_NOT_OK(ValidateDatasetDirParam("USPSDataset", dataset_dir_)); + RETURN_IF_NOT_OK(ValidateStringValue("USPSDataset", usage_, {"train", "test", "all"})); + RETURN_IF_NOT_OK(ValidateScalar("USPSDataset", "num_samples", num_samples_, {0}, false)); + RETURN_IF_NOT_OK(ValidateDatasetShardParams("USPSDataset", num_shards_, shard_id_)); + RETURN_IF_NOT_OK(ValidateEnum("USPSDataset", "ShuffleMode", shuffle_, + {ShuffleMode::kFalse, ShuffleMode::kFiles, ShuffleMode::kGlobal})); + return Status::OK(); +} + +Status USPSNode::Build(std::vector> *const node_ops) { + bool shuffle_files = (shuffle_ == ShuffleMode::kGlobal || shuffle_ == ShuffleMode::kFiles); + + // Do internal Schema generation. + auto schema = std::make_unique(); + RETURN_IF_NOT_OK(schema->AddColumn(ColDescriptor("image", DataType(DataType::DE_UINT8), TensorImpl::kCv, 1))); + TensorShape scalar = TensorShape::CreateScalar(); + RETURN_IF_NOT_OK( + schema->AddColumn(ColDescriptor("label", DataType(DataType::DE_UINT32), TensorImpl::kFlexible, 0, &scalar))); + + auto op = std::make_shared(dataset_dir_, usage_, std::move(schema), num_workers_, worker_connector_size_, + num_samples_, connector_que_size_, shuffle_files, num_shards_, shard_id_); + RETURN_IF_NOT_OK(op->Init()); + + // If a global shuffle is used for USPS, it will inject a shuffle op over the USPS. + // But, if there is a cache in the tree, we do not need the global shuffle and the shuffle op should not be built. + // This is achieved in the cache transform pass where we call MakeSimpleProducer to reset USPS's shuffle + // option to false. + if (shuffle_ == ShuffleMode::kGlobal) { + // Inject ShuffleOp + std::shared_ptr shuffle_op = nullptr; + int64_t num_rows = 0; + + // First, get the number of rows in the dataset + RETURN_IF_NOT_OK(USPSOp::CountTotalRows(dataset_dir_, usage_, &num_rows)); + + // Add the shuffle op after this op + RETURN_IF_NOT_OK(AddShuffleOp(op->FileNames().size(), num_shards_, num_rows, 0, connector_que_size_, &shuffle_op)); + shuffle_op->SetTotalRepeats(GetTotalRepeats()); + shuffle_op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + shuffle_op->Skip(skip_steps_); + node_ops->push_back(shuffle_op); + } + op->SetTotalRepeats(GetTotalRepeats()); + op->SetNumRepeatsPerEpoch(GetNumRepeatsPerEpoch()); + node_ops->push_back(op); + return Status::OK(); +} + +// Get the shard id of node +Status USPSNode::GetShardId(int32_t *shard_id) { + *shard_id = shard_id_; + return Status::OK(); +} + +// Get Dataset size +Status USPSNode::GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) { + if (dataset_size_ > 0) { + *dataset_size = dataset_size_; + return Status::OK(); + } + int64_t num_rows, sample_size = num_samples_; + RETURN_IF_NOT_OK(USPSOp::CountTotalRows(dataset_dir_, usage_, &num_rows)); + num_rows = static_cast(ceil(num_rows / (1.0 * num_shards_))); + *dataset_size = sample_size > 0 ? std::min(num_rows, sample_size) : num_rows; + dataset_size_ = *dataset_size; + return Status::OK(); +} + +Status USPSNode::to_json(nlohmann::json *out_json) { + nlohmann::json args; + args["num_parallel_workers"] = num_workers_; + args["connector_queue_size"] = connector_que_size_; + args["dataset_dir"] = dataset_dir_; + args["usage"] = usage_; + args["num_samples"] = num_samples_; + args["shuffle"] = shuffle_; + args["num_shards"] = num_shards_; + args["shard_id"] = shard_id_; + if (cache_ != nullptr) { + nlohmann::json cache_args; + RETURN_IF_NOT_OK(cache_->to_json(&cache_args)); + args["cache"] = cache_args; + } + *out_json = args; + return Status::OK(); +} + +// Note: The following two functions are common among NonMappableSourceNode and should be promoted to its parent class. +// USPS by itself is a non-mappable dataset that does not support sampling. +// However, if a cache operator is injected at some other place higher in the tree, that cache can +// inherit this sampler from the leaf, providing sampling support from the caching layer. +// That is why we setup the sampler for a leaf node that does not use sampling. +Status USPSNode::SetupSamplerForCache(std::shared_ptr *sampler) { + *sampler = SelectSampler(num_samples_, shuffle_, num_shards_, shard_id_); + return Status::OK(); +} + +// If a cache has been added into the ascendant tree over this USPS node, then the cache will be executing +// a sampler for fetching the data. As such, any options in the USPS node need to be reset to its defaults so +// that this USPS node will produce the full set of data into the cache. +Status USPSNode::MakeSimpleProducer() { + shard_id_ = 0; + num_shards_ = 1; + shuffle_ = ShuffleMode::kFalse; + num_samples_ = 0; + return Status::OK(); +} +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/usps_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/usps_node.h index 56e1152c..45019eaa 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/usps_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/usps_node.h @@ -1,120 +1,120 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_USPS_NODE_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_USPS_NODE_H_ - -#include -#include -#include - -#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" - -namespace mindspore { -namespace dataset { -class USPSNode : public NonMappableSourceNode { - public: - /// \brief Constructor. - USPSNode(const std::string &dataset_dir, const std::string &usage, int32_t num_samples, ShuffleMode shuffle, - int32_t num_shards, int32_t shard_id, std::shared_ptr cache); - - /// \brief Destructor. - ~USPSNode() override = default; - - /// \brief Node name getter. - /// \return Name of the current node. - std::string Name() const override { return kUSPSNode; } - - /// \brief Print the description. - /// \param out - The output stream to write output to. - void Print(std::ostream &out) const override; - - /// \brief Copy the node to a new object. - /// \return A shared pointer to the new copy. - std::shared_ptr Copy() override; - - /// \brief a base class override function to create the required runtime dataset op objects for this class. - /// \param node_ops - A vector containing shared pointer to the Dataset Ops that this object will create. - /// \return Status Status::OK() if build successfully. - Status Build(std::vector> *const node_ops) override; - - /// \brief Parameters validation. - /// \return Status Status::OK() if all the parameters are valid. - Status ValidateParams() override; - - /// \brief Get the shard id of node. - /// \return Status Status::OK() if get shard id successfully. - Status GetShardId(int32_t *shard_id) override; - - /// \brief Base-class override for GetDatasetSize. - /// \param[in] size_getter Shared pointer to DatasetSizeGetter. - /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting - /// dataset size at the expense of accuracy. - /// \param[out] dataset_size the size of the dataset. - /// \return Status of the function. - Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, - int64_t *dataset_size) override; - - /// \brief Getter functions. - const std::string &DatasetDir() const { return dataset_dir_; } - - /// \brief Getter functions. - const std::string &Usage() const { return usage_; } - - /// \brief Getter functions. - int32_t NumSamples() const { return num_samples_; } - - /// \brief Getter functions. - int32_t NumShards() const { return num_shards_; } - - /// \brief Getter functions. - int32_t ShardId() const { return shard_id_; } - - /// \brief Getter functions. - ShuffleMode Shuffle() const { return shuffle_; } - - /// \brief Get the arguments of node. - /// \param[out] out_json JSON string of all attributes. - /// \return Status of the function. - Status to_json(nlohmann::json *out_json) override; - - /// \brief USPS by itself is a non-mappable dataset that does not support sampling. - /// However, if a cache operator is injected at some other place higher in the tree, that cache can - /// inherit this sampler from the leaf, providing sampling support from the caching layer. - /// That is why we setup the sampler for a leaf node that does not use sampling. - /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. - /// \param[in] sampler The sampler to setup. - /// \return Status of the function. - Status SetupSamplerForCache(std::shared_ptr *sampler) override; - - /// \brief If a cache has been added into the ascendant tree over this USPS node, then the cache will be executing - /// a sampler for fetching the data. As such, any options in the USPS node need to be reset to its defaults - /// so that this USPS node will produce the full set of data into the cache. - /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. - /// \return Status of the function. - Status MakeSimpleProducer() override; - - private: - std::string dataset_dir_; - std::string usage_; - int32_t num_samples_; - ShuffleMode shuffle_; - int32_t num_shards_; - int32_t shard_id_; -}; -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_USPS_NODE_H_ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_USPS_NODE_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_USPS_NODE_H_ + +#include +#include +#include + +#include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" + +namespace mindspore::lite { +namespace dataset { +class USPSNode : public NonMappableSourceNode { + public: + /// \brief Constructor. + USPSNode(const std::string &dataset_dir, const std::string &usage, int32_t num_samples, ShuffleMode shuffle, + int32_t num_shards, int32_t shard_id, std::shared_ptr cache); + + /// \brief Destructor. + ~USPSNode() override = default; + + /// \brief Node name getter. + /// \return Name of the current node. + std::string Name() const override { return kUSPSNode; } + + /// \brief Print the description. + /// \param out - The output stream to write output to. + void Print(std::ostream &out) const override; + + /// \brief Copy the node to a new object. + /// \return A shared pointer to the new copy. + std::shared_ptr Copy() override; + + /// \brief a base class override function to create the required runtime dataset op objects for this class. + /// \param node_ops - A vector containing shared pointer to the Dataset Ops that this object will create. + /// \return Status Status::OK() if build successfully. + Status Build(std::vector> *const node_ops) override; + + /// \brief Parameters validation. + /// \return Status Status::OK() if all the parameters are valid. + Status ValidateParams() override; + + /// \brief Get the shard id of node. + /// \return Status Status::OK() if get shard id successfully. + Status GetShardId(int32_t *shard_id) override; + + /// \brief Base-class override for GetDatasetSize. + /// \param[in] size_getter Shared pointer to DatasetSizeGetter. + /// \param[in] estimate This is only supported by some of the ops and it's used to speed up the process of getting + /// dataset size at the expense of accuracy. + /// \param[out] dataset_size the size of the dataset. + /// \return Status of the function. + Status GetDatasetSize(const std::shared_ptr &size_getter, bool estimate, + int64_t *dataset_size) override; + + /// \brief Getter functions. + const std::string &DatasetDir() const { return dataset_dir_; } + + /// \brief Getter functions. + const std::string &Usage() const { return usage_; } + + /// \brief Getter functions. + int32_t NumSamples() const { return num_samples_; } + + /// \brief Getter functions. + int32_t NumShards() const { return num_shards_; } + + /// \brief Getter functions. + int32_t ShardId() const { return shard_id_; } + + /// \brief Getter functions. + ShuffleMode Shuffle() const { return shuffle_; } + + /// \brief Get the arguments of node. + /// \param[out] out_json JSON string of all attributes. + /// \return Status of the function. + Status to_json(nlohmann::json *out_json) override; + + /// \brief USPS by itself is a non-mappable dataset that does not support sampling. + /// However, if a cache operator is injected at some other place higher in the tree, that cache can + /// inherit this sampler from the leaf, providing sampling support from the caching layer. + /// That is why we setup the sampler for a leaf node that does not use sampling. + /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. + /// \param[in] sampler The sampler to setup. + /// \return Status of the function. + Status SetupSamplerForCache(std::shared_ptr *sampler) override; + + /// \brief If a cache has been added into the ascendant tree over this USPS node, then the cache will be executing + /// a sampler for fetching the data. As such, any options in the USPS node need to be reset to its defaults + /// so that this USPS node will produce the full set of data into the cache. + /// Note: This function is common among NonMappableSourceNode and should be promoted to its parent class. + /// \return Status of the function. + Status MakeSimpleProducer() override; + + private: + std::string dataset_dir_; + std::string usage_; + int32_t num_samples_; + ShuffleMode shuffle_; + int32_t num_shards_; + int32_t shard_id_; +}; +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_USPS_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/voc_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/voc_node.cc index 241cdccb..ee0809c6 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/voc_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/voc_node.cc @@ -28,7 +28,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #ifdef ENABLE_PYTHON @@ -246,4 +246,4 @@ Status VOCNode::from_json(nlohmann::json json_obj, std::shared_ptr } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/voc_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/voc_node.h index b911700e..0a4cf699 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/voc_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/voc_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class VOCNode : public MappableSourceNode { public: @@ -124,5 +124,5 @@ class VOCNode : public MappableSourceNode { #endif }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_VOC_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wider_face_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wider_face_node.cc index 4ec0d221..d7f8b173 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wider_face_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wider_face_node.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/wider_face_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for WIDERFaceNode. WIDERFaceNode::WIDERFaceNode(const std::string &dataset_dir, const std::string &usage, const bool &decode, @@ -130,4 +130,4 @@ Status WIDERFaceNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wider_face_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wider_face_node.h index 97e1d527..385c7cae 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wider_face_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wider_face_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class WIDERFaceNode : public MappableSourceNode { public: @@ -99,5 +99,5 @@ class WIDERFaceNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_WIDER_FACE_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wiki_text_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wiki_text_node.cc index fc352855..912ac315 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wiki_text_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wiki_text_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/wiki_text_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for WikiTextNode. WikiTextNode::WikiTextNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, @@ -194,4 +194,4 @@ std::vector WikiTextNode::WalkAllFiles(const std::string &usage, co return wikitext_files_list; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wiki_text_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wiki_text_node.h index 4688ab52..3865060a 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wiki_text_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/wiki_text_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief class WikiTextNode. /// \brief Dataset derived class to represent WikiText dataset. @@ -135,5 +135,5 @@ class WikiTextNode : public NonMappableSourceNode { std::vector wikitext_files_list_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_WIKI_TEXT_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yahoo_answers_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yahoo_answers_node.cc index 2aa70e6e..93a8c331 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yahoo_answers_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yahoo_answers_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/util/path.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { YahooAnswersNode::YahooAnswersNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, int32_t num_shards, int32_t shard_id, @@ -213,4 +213,4 @@ Status YahooAnswersNode::MakeSimpleProducer() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yahoo_answers_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yahoo_answers_node.h index e25f59fc..be37fee7 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yahoo_answers_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yahoo_answers_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/yahoo_answers_op.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class YahooAnswersNode : public NonMappableSourceNode { public: @@ -116,5 +116,5 @@ class YahooAnswersNode : public NonMappableSourceNode { int32_t shard_id_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_YAHOO_ANSWERS_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yelp_review_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yelp_review_node.cc index 7414e805..62a93aa3 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yelp_review_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yelp_review_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/util/path.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { YelpReviewNode::YelpReviewNode(const std::string &dataset_dir, const std::string &usage, int64_t num_samples, ShuffleMode shuffle, int32_t num_shards, int32_t shard_id, @@ -187,4 +187,4 @@ std::vector YelpReviewNode::WalkAllFiles(const std::string &usage, return yelp_review_files_list; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yelp_review_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yelp_review_node.h index cff0e898..0030a511 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yelp_review_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yelp_review_node.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/source/yelp_review_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class YelpReviewNode /// \brief A Node derived class to represent YelpReview Node. @@ -131,5 +131,5 @@ class YelpReviewNode : public NonMappableSourceNode { std::vector yelp_review_files_list_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_YELP_REVIEW_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yes_no_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yes_no_node.cc index 697792bd..61dbe7dc 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yes_no_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yes_no_node.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/source/yes_no_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for YesNoNode. YesNoNode::YesNoNode(const std::string &dataset_dir, std::shared_ptr sampler, @@ -113,4 +113,4 @@ Status YesNoNode::to_json(nlohmann::json *out_json) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yes_no_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yes_no_node.h index 970b6234..6a46c28e 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yes_no_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/source/yes_no_node.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class YesNoNode : public MappableSourceNode { public: @@ -88,5 +88,5 @@ class YesNoNode : public MappableSourceNode { std::shared_ptr sampler_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SOURCE_YES_NO_NODE_H diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/sync_wait_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/sync_wait_node.cc index 71240808..ef646870 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/sync_wait_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/sync_wait_node.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/engine/datasetops/barrier_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for SyncWaitNode SyncWaitNode::SyncWaitNode(std::shared_ptr child, const std::string &condition_name, py::function callback) @@ -54,4 +54,4 @@ Status SyncWaitNode::ValidateParams() { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/sync_wait_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/sync_wait_node.h index fa2be5d6..a1301c05 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/sync_wait_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/sync_wait_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class SyncWaitNode /// \brief A Dataset derived class to represent SyncWaitNode dataset @@ -65,5 +65,5 @@ class SyncWaitNode : public DatasetNode { py::function callback_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_SYNC_WAIT_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/take_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/take_node.cc index 9bda5b9b..ccde3ef8 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/take_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/take_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor for TakeNode TakeNode::TakeNode(std::shared_ptr child, int32_t count) : take_count_(count) { this->AddChild(child); } @@ -95,4 +95,4 @@ Status TakeNode::from_json(nlohmann::json json_obj, std::shared_ptr return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/take_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/take_node.h index e7686fd4..7f2ee77f 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/take_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/take_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TakeNode : public DatasetNode { public: @@ -99,5 +99,5 @@ class TakeNode : public DatasetNode { int32_t take_count_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_TAKE_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/zip_node.cc b/mindspore-lite/minddata/dataset/engine/ir/datasetops/zip_node.cc index 80b19d9f..50f59209 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/zip_node.cc +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/zip_node.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { ZipNode::ZipNode(const std::vector> &datasets) { @@ -100,4 +100,4 @@ Status ZipNode::from_json(std::vector> datasets, st return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/ir/datasetops/zip_node.h b/mindspore-lite/minddata/dataset/engine/ir/datasetops/zip_node.h index d7b03f0c..c885331c 100644 --- a/mindspore-lite/minddata/dataset/engine/ir/datasetops/zip_node.h +++ b/mindspore-lite/minddata/dataset/engine/ir/datasetops/zip_node.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ZipNode : public DatasetNode { public: @@ -85,5 +85,5 @@ class ZipNode : public DatasetNode { std::vector> datasets_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_IR_DATASETOPS_ZIP_NODE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/jagged_connector.h b/mindspore-lite/minddata/dataset/engine/jagged_connector.h index a6af336a..53bf54f7 100644 --- a/mindspore-lite/minddata/dataset/engine/jagged_connector.h +++ b/mindspore-lite/minddata/dataset/engine/jagged_connector.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class JaggedConnector : public Connector { public: @@ -85,5 +85,5 @@ class JaggedConnector : public Connector { std::vector is_queue_finished_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_JAGGED_CONNECTOR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/operator_connector.h b/mindspore-lite/minddata/dataset/engine/operator_connector.h index 70400c07..292d966a 100644 --- a/mindspore-lite/minddata/dataset/engine/operator_connector.h +++ b/mindspore-lite/minddata/dataset/engine/operator_connector.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class OperatorConnector : public Queue { @@ -62,6 +62,6 @@ class OperatorConnector : public Queue { int64_t out_rows_count_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_OPERATOR_CONNECTOR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/pass.cc b/mindspore-lite/minddata/dataset/engine/opt/pass.cc index 06a16d68..9088cd1b 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/pass.cc @@ -51,7 +51,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/data_queue_node.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/zip_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Driver method for TreePass @@ -293,4 +293,4 @@ Status IRNodePass::Visit(std::shared_ptr node, bool *cons return Visit(std::static_pointer_cast(node), modified); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/opt/pass.h b/mindspore-lite/minddata/dataset/engine/opt/pass.h index 587a50bf..e1215cb7 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pass.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Non-leaf IR node class BatchNode; @@ -232,6 +232,6 @@ class IRNodePass : public IRPass { Order traversalOrder_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_OPT_PASS_H_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/post/auto_worker_pass.cc b/mindspore-lite/minddata/dataset/engine/opt/post/auto_worker_pass.cc index a3417111..c3ee1f3d 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/post/auto_worker_pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/post/auto_worker_pass.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/map_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // this will become the RootNode:DatasetNode when it is turned on @@ -136,4 +136,4 @@ float AutoWorkerPass::OpWeightPass::GetNodeWeightFromProfile(std::shared_ptr &node) @@ -153,4 +153,4 @@ Status AddSkipPass::RunOnTree(std::shared_ptr root_ir, bool *const return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/add_skip_pass.h b/mindspore-lite/minddata/dataset/engine/opt/pre/add_skip_pass.h index 4b00b887..3bcd7671 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/add_skip_pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/add_skip_pass.h @@ -20,7 +20,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DatasetOp; @@ -116,6 +116,6 @@ class AddSkipPass : public IRTreePass { Status RunOnTree(std::shared_ptr root_ir, bool *const modified) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_OPT_PRE_ADD_SKIP_PASS_H_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/cache_validation_pass.cc b/mindspore-lite/minddata/dataset/engine/opt/pre/cache_validation_pass.cc index c85a3fc8..a9ca4076 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/cache_validation_pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/cache_validation_pass.cc @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/zip_node.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Constructor @@ -186,4 +186,4 @@ Status CacheValidationPass::VisitAfter(std::shared_ptr node, bool * return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/cache_validation_pass.h b/mindspore-lite/minddata/dataset/engine/opt/pre/cache_validation_pass.h index 7964c7fa..33656cee 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/cache_validation_pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/cache_validation_pass.h @@ -22,7 +22,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class CacheValidationPass cache_validation_pass.h @@ -106,6 +106,6 @@ class CacheValidationPass : public IRNodePass { bool is_mappable_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_OPT_PRE_CACHE_VALIDATION_PASS_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/debug_mode_pass.cc b/mindspore-lite/minddata/dataset/engine/opt/pre/debug_mode_pass.cc index c2585273..7a972ab1 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/debug_mode_pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/debug_mode_pass.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/shuffle_node.h" #include "mindspore-lite/minddata/dataset/include/dataset/datasets.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr uint32_t kSeedValue = 1; bool DebugModePass::DebugPass::RemoveCache(std::shared_ptr node) const { @@ -97,4 +97,4 @@ Status DebugModePass::RunOnTree(std::shared_ptr root_ir, bool *cons return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/debug_mode_pass.h b/mindspore-lite/minddata/dataset/engine/opt/pre/debug_mode_pass.h index 67f4e063..63181a2f 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/debug_mode_pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/debug_mode_pass.h @@ -20,7 +20,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class DebugModePass /// \brief This is a pre parse pass that disable some nodes and prepares for the debug mode. @@ -72,6 +72,6 @@ class DebugModePass : public IRTreePass { }; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_OPT_PRE_DEBUG_MODE_PASS_H_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/deep_copy_pass.cc b/mindspore-lite/minddata/dataset/engine/opt/pre/deep_copy_pass.cc index 9fc4da9f..bfdfa6c8 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/deep_copy_pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/deep_copy_pass.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/root_node.h" #include "mindspore-lite/minddata/dataset/include/dataset/datasets.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { DeepCopyPass::DeepCopyPass() { @@ -69,4 +69,4 @@ Status DeepCopyPass::VisitAfter(std::shared_ptr node, bool *const m return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/deep_copy_pass.h b/mindspore-lite/minddata/dataset/engine/opt/pre/deep_copy_pass.h index 1a027fa6..4a4d3d43 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/deep_copy_pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/deep_copy_pass.h @@ -21,7 +21,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class DeepCopyPass @@ -56,6 +56,6 @@ class DeepCopyPass : public IRNodePass { DatasetNode *parent_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_OPT_PRE_DEEP_COPY_PASS_H_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc b/mindspore-lite/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc index 09931034..277f797f 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/root_node.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/data_queue_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // constructor @@ -94,4 +94,4 @@ Status EpochCtrlPass::RunOnTree(std::shared_ptr root_ir, bool *cons return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.h b/mindspore-lite/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.h index 0f857143..dcf6c91f 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/epoch_ctrl_pass.h @@ -21,7 +21,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DatasetOp; @@ -93,6 +93,6 @@ class EpochCtrlPass : public IRTreePass { Status RunOnTree(std::shared_ptr root_ir, bool *const modified) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_OPT_PASS_PRE_EPOCH_INJECTION_PASS_H_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/getter_pass.cc b/mindspore-lite/minddata/dataset/engine/opt/pre/getter_pass.cc index 8602fcc9..afd4dcc3 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/getter_pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/getter_pass.cc @@ -16,7 +16,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pre/getter_pass.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/map_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status GetterPass::Visit(std::shared_ptr node, bool *const modified) { @@ -24,4 +24,4 @@ Status GetterPass::Visit(std::shared_ptr node, bool *const modified) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/getter_pass.h b/mindspore-lite/minddata/dataset/engine/opt/pre/getter_pass.h index b8f90ccc..9a3d24b5 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/getter_pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/getter_pass.h @@ -21,7 +21,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DatasetOp; @@ -39,6 +39,6 @@ class GetterPass : public IRNodePass { Status Visit(std::shared_ptr node, bool *const modified) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_OPT_PASS_PRE_GETTER_PASS_H_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/input_validation_pass.cc b/mindspore-lite/minddata/dataset/engine/opt/pre/input_validation_pass.cc index 7183b112..958e8155 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/input_validation_pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/input_validation_pass.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/datasets.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status InputValidationPass::Visit(std::shared_ptr node, bool *const modified) { @@ -42,4 +42,4 @@ Status InputValidationPass::Visit(std::shared_ptr node, bool *const return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/input_validation_pass.h b/mindspore-lite/minddata/dataset/engine/opt/pre/input_validation_pass.h index 25d0febb..6eb01776 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/input_validation_pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/input_validation_pass.h @@ -21,7 +21,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class InputValidationPass @@ -35,6 +35,6 @@ class InputValidationPass : public IRNodePass { Status Visit(std::shared_ptr node, bool *const modified) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // DATASET_ENGINE_OPT_PRE_INPUT_VALIDATION_PASS_H_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/insert_map_pass.cc b/mindspore-lite/minddata/dataset/engine/opt/pre/insert_map_pass.cc index 834f8ea2..26d52fea 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/insert_map_pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/insert_map_pass.cc @@ -25,7 +25,7 @@ #endif #include "mindspore-lite/minddata/dataset/kernels/ir/data/transforms_ir.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { #ifndef ENABLE_ANDROID Status InsertMapPass::Visit(std::shared_ptr node, bool *const modified) { RETURN_UNEXPECTED_IF_NULL(node); @@ -77,4 +77,4 @@ Status InsertMapPass::Visit(std::shared_ptr node, bool *const modi return Status ::OK(); } #endif -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/insert_map_pass.h b/mindspore-lite/minddata/dataset/engine/opt/pre/insert_map_pass.h index f2d89594..cf08e365 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/insert_map_pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/insert_map_pass.h @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class InsertMapPass : public IRNodePass { public: @@ -40,5 +40,5 @@ class InsertMapPass : public IRNodePass { #endif }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_OPT_PRE_INSERT_MAP_PASS_H_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/node_removal_pass.cc b/mindspore-lite/minddata/dataset/engine/opt/pre/node_removal_pass.cc index 29ae2677..306a8e03 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/node_removal_pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/node_removal_pass.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/skip_node.h" #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/take_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { NodeRemovalPass::RemovalNodes::RemovalNodes() {} @@ -74,4 +74,4 @@ Status NodeRemovalPass::RunOnTree(std::shared_ptr root_ir, bool *co return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/node_removal_pass.h b/mindspore-lite/minddata/dataset/engine/opt/pre/node_removal_pass.h index d5598aa5..e65298c3 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/node_removal_pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/node_removal_pass.h @@ -21,7 +21,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DatasetOp; @@ -81,6 +81,6 @@ class NodeRemovalPass : public IRTreePass { Status RunOnTree(std::shared_ptr root_ir, bool *const modified) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_OPT_PRE_NODE_REMOVAL_PASS_H_ diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/skip_pushdown_pass.cc b/mindspore-lite/minddata/dataset/engine/opt/pre/skip_pushdown_pass.cc index b40333ff..bbf6a66c 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/skip_pushdown_pass.cc +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/skip_pushdown_pass.cc @@ -30,7 +30,7 @@ #endif #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/source/samplers/skip_first_epoch_sampler_ir.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { SkipPushdownPass::SkipNodes::SkipNodes() : skip_count_(0), skip_steps_(0) {} @@ -182,4 +182,4 @@ Status SkipPushdownPass::RunOnTree(std::shared_ptr root_ir, bool *c return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/opt/pre/skip_pushdown_pass.h b/mindspore-lite/minddata/dataset/engine/opt/pre/skip_pushdown_pass.h index feca21d2..b8ef652e 100644 --- a/mindspore-lite/minddata/dataset/engine/opt/pre/skip_pushdown_pass.h +++ b/mindspore-lite/minddata/dataset/engine/opt/pre/skip_pushdown_pass.h @@ -22,7 +22,7 @@ #include #include "mindspore-lite/minddata/dataset/engine/opt/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class BatchNode; class DatasetNode; @@ -172,6 +172,6 @@ class SkipPushdownPass : public IRTreePass { Status RunOnTree(std::shared_ptr root_ir, bool *const modified) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_OPT_PRE_SKIP_PUSHDOWN_PASS_H_ diff --git a/mindspore-lite/minddata/dataset/engine/perf/auto_tune.cc b/mindspore-lite/minddata/dataset/engine/perf/auto_tune.cc index ef9a7ccb..398f0319 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/auto_tune.cc +++ b/mindspore-lite/minddata/dataset/engine/perf/auto_tune.cc @@ -30,7 +30,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { AutoTune::AutoTune(TreeAdapter *tree_adap, ProfilingManager *profiling_mgr) : tree_adapter_(tree_adap), @@ -754,4 +754,4 @@ Status AutoTune::AnalyseMemory() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/perf/auto_tune.h b/mindspore-lite/minddata/dataset/engine/perf/auto_tune.h index 84e28ad3..4bc75bc3 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/auto_tune.h +++ b/mindspore-lite/minddata/dataset/engine/perf/auto_tune.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/engine/tree_modifier.h" #include "mindspore-lite/minddata/dataset/engine/perf/profiling.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TreeModifier; class AutoTune { @@ -302,5 +302,5 @@ class AutoTune { nlohmann::json autotune_config_json_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_PERF_AUTO_TUNE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/perf/connector_size.cc b/mindspore-lite/minddata/dataset/engine/perf/connector_size.cc index d9a16675..83f78b80 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/connector_size.cc +++ b/mindspore-lite/minddata/dataset/engine/perf/connector_size.cc @@ -32,7 +32,7 @@ namespace platform = mindspore::lite; #endif using json = nlohmann::json; -namespace mindspore { +namespace mindspore::lite { namespace dataset { using Qrow = std::vector; @@ -167,4 +167,4 @@ Path ConnectorSize::GetFileName(const std::string &dir_path, const std::string & return Path(dir_path) / Path("pipeline_profiling_" + rank_id + ".json"); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/perf/connector_size.h b/mindspore-lite/minddata/dataset/engine/perf/connector_size.h index b8e71e97..0e19e539 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/connector_size.h +++ b/mindspore-lite/minddata/dataset/engine/perf/connector_size.h @@ -24,7 +24,7 @@ using json = nlohmann::json; -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ExecutionTree; @@ -82,6 +82,6 @@ class ConnectorSize : public Sampling { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CONNECTOR_SIZE_H diff --git a/mindspore-lite/minddata/dataset/engine/perf/cpu_sampler.cc b/mindspore-lite/minddata/dataset/engine/perf/cpu_sampler.cc index dee5e6c1..7cd6800d 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/cpu_sampler.cc +++ b/mindspore-lite/minddata/dataset/engine/perf/cpu_sampler.cc @@ -32,7 +32,7 @@ #include "mindspore-lite/minddata/dataset/util/path.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using json = nlohmann::json; #if !defined(_WIN32) && !defined(_WIN64) && !defined(__ANDROID__) && !defined(ANDROID) && !defined(__APPLE__) @@ -779,4 +779,4 @@ Status CpuSampler::GetSystemMemoryInfo(SystemMemoryMetric metric, uint64_t start return (sys_info_.GetSystemMemInfo(metric, start_index, end_index, result)); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/perf/cpu_sampler.h b/mindspore-lite/minddata/dataset/engine/perf/cpu_sampler.h index 21f53388..70c76a75 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/cpu_sampler.h +++ b/mindspore-lite/minddata/dataset/engine/perf/cpu_sampler.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/engine/perf/profiling.h" #include "mindspore-lite/minddata/dataset/engine/datasetops/dataset_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ExecutionTree; @@ -212,5 +212,5 @@ class CpuSampler : public Sampling { Path GetFileName(const std::string &dir_path, const std::string &rank_id) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_PERF_CPU_SAMPLER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/perf/cyclic_array.h b/mindspore-lite/minddata/dataset/engine/perf/cyclic_array.h index 42631683..2b5f64e8 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/cyclic_array.h +++ b/mindspore-lite/minddata/dataset/engine/perf/cyclic_array.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class CyclicArray "include/cyclic_array.h /// \brief This is a container with a contiguous memory layout that pnly keeps N last entries, @@ -193,5 +193,5 @@ class CyclicArray { dsize_t capacity_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_CYCLIC_ARRAY_H diff --git a/mindspore-lite/minddata/dataset/engine/perf/dataset_iterator_tracing.cc b/mindspore-lite/minddata/dataset/engine/perf/dataset_iterator_tracing.cc index 52e9e966..ad25c5eb 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/dataset_iterator_tracing.cc +++ b/mindspore-lite/minddata/dataset/engine/perf/dataset_iterator_tracing.cc @@ -18,10 +18,10 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/path.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Path DatasetIteratorTracing::GetFileName(const std::string &dir_path, const std::string &rank_id) { return Path(dir_path) / Path("dataset_iterator_profiling_" + rank_id + ".txt"); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/perf/dataset_iterator_tracing.h b/mindspore-lite/minddata/dataset/engine/perf/dataset_iterator_tracing.h index 5e179fc6..a1cc364b 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/dataset_iterator_tracing.h +++ b/mindspore-lite/minddata/dataset/engine/perf/dataset_iterator_tracing.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/perf/profiling.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DatasetIteratorTracing : public Tracing { public: @@ -38,6 +38,6 @@ class DatasetIteratorTracing : public Tracing { Path GetFileName(const std::string &dir_path, const std::string &rank_id) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_DATASET_ITERATOR_TRACING_H diff --git a/mindspore-lite/minddata/dataset/engine/perf/device_queue_tracing.cc b/mindspore-lite/minddata/dataset/engine/perf/device_queue_tracing.cc index 3e2ea183..936f96e0 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/device_queue_tracing.cc +++ b/mindspore-lite/minddata/dataset/engine/perf/device_queue_tracing.cc @@ -19,10 +19,10 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/path.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Path DeviceQueueTracing::GetFileName(const std::string &dir_path, const std::string &rank_id) { return Path(dir_path) / Path("device_queue_profiling_" + rank_id + ".txt"); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/perf/device_queue_tracing.h b/mindspore-lite/minddata/dataset/engine/perf/device_queue_tracing.h index fb54c1ac..6741fe11 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/device_queue_tracing.h +++ b/mindspore-lite/minddata/dataset/engine/perf/device_queue_tracing.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/engine/perf/profiling.h" #include "mindspore-lite/minddata/dataset/util/path.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DeviceQueueTracing : public Tracing { public: @@ -38,6 +38,6 @@ class DeviceQueueTracing : public Tracing { Path GetFileName(const std::string &dir_path, const std::string &rank_id) override; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_DEVICE_QUEUE_TRACING_H diff --git a/mindspore-lite/minddata/dataset/engine/perf/info_collector.cc b/mindspore-lite/minddata/dataset/engine/perf/info_collector.cc index 3809c3b9..0fa336ee 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/info_collector.cc +++ b/mindspore-lite/minddata/dataset/engine/perf/info_collector.cc @@ -17,7 +17,7 @@ #include "mindspore-lite/minddata/dataset/engine/perf/info_collector.h" #include "mindspore/mindspore/ccsrc/tools/profiler/profiling.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { uint64_t GetSyscnt() { uint64_t time_cnt = 0; @@ -50,4 +50,4 @@ Status CollectOpInfo(const std::string &event, const std::string &stage, const u #endif return Status::OK(); } -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset diff --git a/mindspore-lite/minddata/dataset/engine/perf/info_collector.h b/mindspore-lite/minddata/dataset/engine/perf/info_collector.h index f814b2ce..e94ffe6f 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/info_collector.h +++ b/mindspore-lite/minddata/dataset/engine/perf/info_collector.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { enum InfoLevel : uint8_t { kDeveloper = 0, kUser = 1 }; enum InfoType : uint8_t { kAll = 0, kMemory = 1, kTime = 2 }; enum TimeType : uint8_t { kStart = 0, kEnd = 1, kStamp = 2 }; @@ -36,5 +36,5 @@ Status CollectPipelineInfo(const std::string &event, const std::string &stage, c Status CollectOpInfo(const std::string &event, const std::string &stage, const uint64_t &start_time, const std::map &custom_info = {}); -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_PERF_INFO_COLLECTOR_H_ diff --git a/mindspore-lite/minddata/dataset/engine/perf/monitor.cc b/mindspore-lite/minddata/dataset/engine/perf/monitor.cc index 1722630c..d46451a8 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/monitor.cc +++ b/mindspore-lite/minddata/dataset/engine/perf/monitor.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/engine/execution_tree.h" #include "mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Monitor::Monitor(ProfilingManager *profiling_manager) : Monitor(profiling_manager, GlobalContext::config_manager()) {} @@ -64,4 +64,4 @@ Status Monitor::operator()() { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/perf/monitor.h b/mindspore-lite/minddata/dataset/engine/perf/monitor.h index 95022d89..406a8a2a 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/monitor.h +++ b/mindspore-lite/minddata/dataset/engine/perf/monitor.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/cond_var.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ExecutionTree; class ProfilingManager; @@ -56,6 +56,6 @@ class Monitor { CondVar cv_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_MONITOR_H diff --git a/mindspore-lite/minddata/dataset/engine/perf/perf_data.h b/mindspore-lite/minddata/dataset/engine/perf/perf_data.h index 480ee7a1..28cf913a 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/perf_data.h +++ b/mindspore-lite/minddata/dataset/engine/perf/perf_data.h @@ -20,7 +20,7 @@ #include #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // PerfData is a convenience class to record and store the data produced by Monitor @@ -88,5 +88,5 @@ class PerfData { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_PERF_DATA_H diff --git a/mindspore-lite/minddata/dataset/engine/perf/profiling.cc b/mindspore-lite/minddata/dataset/engine/perf/profiling.cc index b82d1e0a..74828bce 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/profiling.cc +++ b/mindspore-lite/minddata/dataset/engine/perf/profiling.cc @@ -40,7 +40,7 @@ namespace platform = mindspore; namespace platform = mindspore::lite; #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr int32_t PUSH_TIME_OFFSET = 0; constexpr int32_t BATCH_TIME_OFFSET = 1; @@ -873,4 +873,4 @@ uint64_t ProfilingTime::GetCurMilliSecond() { return static_cast(duration_cast(steady_clock::now().time_since_epoch()).count()); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/perf/profiling.h b/mindspore-lite/minddata/dataset/engine/perf/profiling.h index de3c98bd..543a20c0 100644 --- a/mindspore-lite/minddata/dataset/engine/perf/profiling.h +++ b/mindspore-lite/minddata/dataset/engine/perf/profiling.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/engine/perf/monitor.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class Monitor; @@ -620,5 +620,5 @@ class ProfilingTime { static uint64_t GetCurMilliSecond(); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_PERF_PROFILING_H_ diff --git a/mindspore-lite/minddata/dataset/engine/python_runtime_context.cc b/mindspore-lite/minddata/dataset/engine/python_runtime_context.cc index 16acbc66..2d1dd750 100644 --- a/mindspore-lite/minddata/dataset/engine/python_runtime_context.cc +++ b/mindspore-lite/minddata/dataset/engine/python_runtime_context.cc @@ -17,7 +17,7 @@ #include "mindspore-lite/minddata/dataset/engine/python_runtime_context.h" #include "pybind11/pybind11.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { Status PythonRuntimeContext::Terminate() { MS_LOG(INFO) << "Terminating a Dataset PythonRuntime."; if (tree_consumer_ != nullptr) { @@ -51,4 +51,4 @@ TreeConsumer *PythonRuntimeContext::GetPythonConsumer() { return dynamic_cast(tree_consumer_.get()); } } -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset diff --git a/mindspore-lite/minddata/dataset/engine/python_runtime_context.h b/mindspore-lite/minddata/dataset/engine/python_runtime_context.h index 7f5df619..23cd2f61 100644 --- a/mindspore-lite/minddata/dataset/engine/python_runtime_context.h +++ b/mindspore-lite/minddata/dataset/engine/python_runtime_context.h @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/engine/consumers/python_tree_consumer.h" #include "mindspore-lite/minddata/dataset/engine/runtime_context.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { class NativeRuntimeContext; /// Class that represents Python single runtime instance which can consume data from a data pipeline @@ -42,5 +42,5 @@ class PythonRuntimeContext : public RuntimeContext { Status TerminateImpl(); }; -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_PYTHON_RUNTIME_CONTEXT_H_ diff --git a/mindspore-lite/minddata/dataset/engine/runtime_context.cc b/mindspore-lite/minddata/dataset/engine/runtime_context.cc index 3e9361de..ad5bccbb 100644 --- a/mindspore-lite/minddata/dataset/engine/runtime_context.cc +++ b/mindspore-lite/minddata/dataset/engine/runtime_context.cc @@ -17,7 +17,7 @@ #include "mindspore-lite/minddata/dataset/engine/runtime_context.h" #include -namespace mindspore::dataset { +namespace mindspore::lite::dataset { void RuntimeContext::AssignConsumer(std::shared_ptr tree_consumer) { tree_consumer_ = std::move(tree_consumer); } @@ -45,4 +45,4 @@ NativeRuntimeContext::~NativeRuntimeContext() { TreeConsumer *RuntimeContext::GetConsumer() { return tree_consumer_.get(); } Status RuntimeContext::Init() const { return GlobalInit(); } -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset diff --git a/mindspore-lite/minddata/dataset/engine/runtime_context.h b/mindspore-lite/minddata/dataset/engine/runtime_context.h index 7d5d7639..2a3cbe31 100644 --- a/mindspore-lite/minddata/dataset/engine/runtime_context.h +++ b/mindspore-lite/minddata/dataset/engine/runtime_context.h @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/core/client.h" #include "mindspore-lite/minddata/dataset/engine/consumers/tree_consumer.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { class TreeConsumer; /// Class that represents single runtime instance which can consume data from a data pipeline class RuntimeContext { @@ -64,5 +64,5 @@ class NativeRuntimeContext : public RuntimeContext { Status TerminateImpl(); }; -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_RUNTIME_CONTEXT_H_ diff --git a/mindspore-lite/minddata/dataset/engine/serdes.cc b/mindspore-lite/minddata/dataset/engine/serdes.cc index c114549a..2ff52e38 100644 --- a/mindspore-lite/minddata/dataset/engine/serdes.cc +++ b/mindspore-lite/minddata/dataset/engine/serdes.cc @@ -28,7 +28,7 @@ #endif #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { std::map *operation)> Serdes::func_ptr_ = Serdes::InitializeFuncPtr(); @@ -484,4 +484,4 @@ void ParseMindIRPreprocess_C(const std::vector &dataset_json, } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/serdes.h b/mindspore-lite/minddata/dataset/engine/serdes.h index 46b33d5d..f3e134fb 100644 --- a/mindspore-lite/minddata/dataset/engine/serdes.h +++ b/mindspore-lite/minddata/dataset/engine/serdes.h @@ -134,7 +134,7 @@ #include "mindspore-lite/minddata/dataset/text/ir/kernels/text_ir.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief The Serdes class is used to serialize an IR tree into JSON string and dump into file if file name /// specified. @@ -242,6 +242,6 @@ class Serdes { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_SERDES_H_ diff --git a/mindspore-lite/minddata/dataset/engine/tree_adapter.cc b/mindspore-lite/minddata/dataset/engine/tree_adapter.cc index a31b2e28..42eba813 100644 --- a/mindspore-lite/minddata/dataset/engine/tree_adapter.cc +++ b/mindspore-lite/minddata/dataset/engine/tree_adapter.cc @@ -59,7 +59,7 @@ #include "utils/ms_context.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { TreeAdapter::TreeAdapter(UsageFlag usage) : usage_(usage), @@ -838,4 +838,4 @@ Status TreeAdapter::Launch() { nlohmann::json TreeAdapter::GetOffloadJson() { return offload_json_; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/tree_adapter.h b/mindspore-lite/minddata/dataset/engine/tree_adapter.h index 2bb9da3b..12ab4b5e 100644 --- a/mindspore-lite/minddata/dataset/engine/tree_adapter.h +++ b/mindspore-lite/minddata/dataset/engine/tree_adapter.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/engine/perf/auto_tune.h" #include "mindspore-lite/minddata/dataset/engine/perf/dataset_iterator_tracing.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DatasetNode; class TreeModifier; @@ -186,6 +186,6 @@ class TreeAdapter { nlohmann::json offload_json_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_TREE_ADAPTER_H_ diff --git a/mindspore-lite/minddata/dataset/engine/tree_adapter_lite.cc b/mindspore-lite/minddata/dataset/engine/tree_adapter_lite.cc index b6328d2d..2ba986b4 100644 --- a/mindspore-lite/minddata/dataset/engine/tree_adapter_lite.cc +++ b/mindspore-lite/minddata/dataset/engine/tree_adapter_lite.cc @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/engine/opt/pre/insert_map_pass.h" #include "mindspore-lite/minddata/dataset/engine/opt/pre/node_removal_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { TreeAdapterLite::TreeAdapterLite(UsageFlag usage) : root_(nullptr), usage_(usage) { // Create ExecutionTree. @@ -177,4 +177,4 @@ Status TreeAdapterLite::Compile(const std::shared_ptr &input_ir, in nlohmann::json TreeAdapterLite::GetOffloadJson() { return offload_json_; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/tree_adapter_lite.h b/mindspore-lite/minddata/dataset/engine/tree_adapter_lite.h index af5b61a6..0c0aa854 100644 --- a/mindspore-lite/minddata/dataset/engine/tree_adapter_lite.h +++ b/mindspore-lite/minddata/dataset/engine/tree_adapter_lite.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/engine/ir/datasetops/dataset_node.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TensorRow; @@ -79,6 +79,6 @@ class TreeAdapterLite { }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_TREE_ADAPTER_LITE_H_ diff --git a/mindspore-lite/minddata/dataset/engine/tree_modifier.cc b/mindspore-lite/minddata/dataset/engine/tree_modifier.cc index 111df095..71837d47 100644 --- a/mindspore-lite/minddata/dataset/engine/tree_modifier.cc +++ b/mindspore-lite/minddata/dataset/engine/tree_modifier.cc @@ -16,7 +16,7 @@ #include "mindspore-lite/minddata/dataset/engine/tree_modifier.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status AutotuneCallback::DSNStepBegin(const CallbackParam &cb_param) { // check if the queue is empty, no need to wait until a change request is ready @@ -67,4 +67,4 @@ Status ChangeNumWorkersRequest::ApplyChange(DatasetOp *op) { TreeModifier::TreeModifier(const TreeAdapter *adapter) : TreeModifier(adapter->tree_.get()) {} } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/engine/tree_modifier.h b/mindspore-lite/minddata/dataset/engine/tree_modifier.h index 332505dc..bb7edf54 100644 --- a/mindspore-lite/minddata/dataset/engine/tree_modifier.h +++ b/mindspore-lite/minddata/dataset/engine/tree_modifier.h @@ -29,7 +29,7 @@ constexpr int64_t queue_size = 10; -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DatasetNode; @@ -158,6 +158,6 @@ class TreeModifier { uint64_t num_requests_ = 0; // counter for number of requests received }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_ENGINE_TREE_MODIFIER_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/audio.h b/mindspore-lite/minddata/dataset/include/dataset/audio.h index ddf2315a..0b512854 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/audio.h +++ b/mindspore-lite/minddata/dataset/include/dataset/audio.h @@ -1,1437 +1,1437 @@ -/** - * Copyright 2021-2024 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_AUDIO_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_AUDIO_H_ - -#include -#include -#include -#include -#include -#include - -#include "include/api/dual_abi_helper.h" -#include "include/api/status.h" -#include "include/api/visible.h" -#include "include/dataset/constants.h" -#include "include/dataset/transforms.h" - -namespace mindspore { -namespace dataset { -class TensorOperation; - -// Transform operations for performing computer audio. -namespace audio { -/// \brief Compute the angle of complex tensor input. -class DATASET_API Angle final : public TensorTransform { - public: - /// \brief Constructor. - Angle(); - - /// \brief Destructor. - ~Angle() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; -}; - -/// \brief Design two-pole allpass filter. Similar to SoX implementation. -class DATASET_API AllpassBiquad final : public TensorTransform { - public: - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. - /// \param[in] central_freq Central frequency (in Hz). - /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. - explicit AllpassBiquad(int32_t sample_rate, float central_freq, float Q = 0.707); - - /// \brief Destructor. - ~AllpassBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief AmplitudeToDB TensorTransform. -/// \notes Turn a tensor from the power/amplitude scale to the decibel scale. -class DATASET_API AmplitudeToDB final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] stype Scale of input tensor, must be one of [ScaleType::kPower, ScaleType::kMagnitude]. - /// Default: ScaleType::kPower. - /// \param[in] ref_value Calculate db_multiplier. Default: 1.0. - /// \param[in] amin Minimum threshold for input tensor and ref_value. It must be greater than zero. Default: 1e-10. - /// \param[in] top_db Decibels cut-off value. It must be greater than or equal to zero. Default: 80.0. - explicit AmplitudeToDB(ScaleType stype = ScaleType::kPower, float ref_value = 1.0, float amin = 1e-10, - float top_db = 80.0); - - /// \brief Destructor. - ~AmplitudeToDB() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Design two-pole band filter. -class DATASET_API BandBiquad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. - /// \param[in] central_freq Central frequency (in Hz). - /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. - /// \param[in] noise Choose alternate mode for un-pitched audio or mode oriented to pitched audio. Default: False. - explicit BandBiquad(int32_t sample_rate, float central_freq, float Q = 0.707, bool noise = false); - - /// \brief Destructor. - ~BandBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Design two-pole band-pass filter. -class DATASET_API BandpassBiquad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. - /// \param[in] central_freq Central frequency (in Hz). - /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. - /// \param[in] const_skirt_gain, If True, uses a constant skirt gain (peak gain = Q). If False, uses a - /// constant 0dB peak gain. Default: False. - explicit BandpassBiquad(int32_t sample_rate, float central_freq, float Q = 0.707, bool const_skirt_gain = false); - - /// \brief Destructor. - ~BandpassBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Design two-pole band-reject filter. Similar to SoX implementation. -class DATASET_API BandrejectBiquad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. - /// \param[in] central_freq Central frequency (in Hz). - /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. - explicit BandrejectBiquad(int32_t sample_rate, float central_freq, float Q = 0.707); - - /// \brief Destructor. - ~BandrejectBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Design a bass tone-control effect. -class DATASET_API BassBiquad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. - /// \param[in] gain Desired gain at the boost (or attenuation) in dB. - /// \param[in] central_freq Central frequency (in Hz). Default: 100.0. - /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. - explicit BassBiquad(int32_t sample_rate, float gain, float central_freq = 100.0, float Q = 0.707); - - /// \brief Destructor. - ~BassBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Perform a biquad filter of input tensor. -class DATASET_API Biquad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] b0 Numerator coefficient of current input, x[n]. - /// \param[in] b1 Numerator coefficient of input one time step ago x[n-1]. - /// \param[in] b2 Numerator coefficient of input two time steps ago x[n-2]. - /// \param[in] a0 Denominator coefficient of current output y[n], the value can't be zero, typically 1. - /// \param[in] a1 Denominator coefficient of current output y[n-1]. - /// \param[in] a2 Denominator coefficient of current output y[n-2]. - explicit Biquad(float b0, float b1, float b2, float a0, float a1, float a2); - - /// \brief Destructor. - ~Biquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief ComplexNorm TensorTransform. -/// \notes Compute the norm of complex tensor input. -class DATASET_API ComplexNorm final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] power Power of the norm, which must be non-negative. Default: 1.0. - explicit ComplexNorm(float power = 1.0); - - /// \brief Destructor. - ~ComplexNorm() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief ComputeDeltas Transform. -/// \note Compute delta coefficients of a spectrogram. -class DATASET_API ComputeDeltas final : public TensorTransform { - public: - /// \brief Construct a new Compute Deltas object. - /// \f[ - /// d_{t}=\frac{{\textstyle\sum_{n=1}^{N}}n(c_{t+n}-c_{t-n})}{2{\textstyle\sum_{n=1}^{N}}n^{2}} - /// \f] - /// \param[in] win_length The window length used for computing delta, must be no less than 3. Default: 5. - /// \param[in] pad_mode Padding mode. Can be one of BorderType::kConstant, BorderType::kEdge, - /// BorderType::kReflect or BorderType::kSymmetric. Default: BorderType::kEdge. - explicit ComputeDeltas(int32_t win_length = 5, BorderType pad_mode = BorderType::kEdge); - - /// \brief Destructor. - ~ComputeDeltas() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Apply contrast effect. -class DATASET_API Contrast final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] enhancement_amount Controls the amount of the enhancement. Default: 75.0. - explicit Contrast(float enhancement_amount = 75.0); - - /// \brief Destructor. - ~Contrast() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Turn a waveform from the decibel scale to the power/amplitude scale. -class DATASET_API DBToAmplitude final : public TensorTransform { - public: - /// \brief Constructor - /// \param[in] ref Reference which the output will be scaled by. - /// \param[in] power If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude. - explicit DBToAmplitude(float ref, float power); - - /// \brief Destructor. - ~DBToAmplitude() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Apply a DC shift to the audio. -class DATASET_API DCShift : public TensorTransform { - public: - /// \brief Constructor - /// \param[in] shift Indicates the amount to shift the audio, the value must be in the range [-2.0, 2.0]. - /// \param[in] limiter_gain Used only on peaks to prevent clipping. - DCShift(float shift, float limiter_gain); - - /// \brief Constructor - /// \param[in] shift Indicates the amount to shift the audio. - /// \note This constructor will use `shift` as `limiter_gain`. - explicit DCShift(float shift); - - /// \brief Destructor. - ~DCShift() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \param[in] n_mfcc Number of mfc coefficients to retain, the value must be greater than 0. -/// \param[in] n_mels Number of mel filterbanks, the value must be greater than 0. -/// \param[in] norm Norm to use, can be NormMode::kNone or NormMode::kOrtho. -/// \return Status error code, returns OK if no error encountered. -Status CreateDct(mindspore::MSTensor *output, int32_t n_mfcc, int32_t n_mels, NormMode norm = NormMode::kNone); - -/// \brief Design two-pole deemph filter. Similar to SoX implementation. -class DATASET_API DeemphBiquad final : public TensorTransform { - public: - /// \param[in] sample_rate Sampling rate of the waveform, the value can only be 44100 (Hz) or 48000(hz). - explicit DeemphBiquad(int32_t sample_rate); - - /// \brief Destructor. - ~DeemphBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Detect pitch frequency. -class DATASET_API DetectPitchFrequency final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. - /// \param[in] frame_time Duration of a frame, the value must be greater than zero. Default: 0.02. - /// \param[in] win_length The window length for median smoothing (in number of frames), the value must - /// be greater than zero. Default: 30. - /// \param[in] freq_low Lowest frequency that can be detected (Hz), the value must be greater than zero. Default: 85. - /// \param[in] freq_high Highest frequency that can be detected (Hz), the value must be greater than - /// zero. Default: 3400. - explicit DetectPitchFrequency(int32_t sample_rate, float frame_time = 0.01, int32_t win_length = 30, - int32_t freq_low = 85, int32_t freq_high = 3400); - - /// \brief Destructor. - ~DetectPitchFrequency() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Dither increases the perceived dynamic range of audio stored at a -/// particular bit-depth by eliminating nonlinear truncation distortion. -class DATASET_API Dither final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] density_function The density function of a continuous random variable. - /// Can be one of DensityFunction::kTPDF (Triangular Probability Density Function), - /// DensityFunction::kRPDF (Rectangular Probability Density Function) or - /// DensityFunction::kGPDF (Gaussian Probability Density Function). Default: DensityFunction::kTPDF. - /// \param[in] noise_shaping A filtering process that shapes the spectral energy of - /// quantisation error. Default: false. - explicit Dither(DensityFunction density_function = DensityFunction::kTPDF, bool noise_shaping = false); - - /// \brief Destructor. - ~Dither() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief EqualizerBiquad TensorTransform. Apply highpass biquad filter on audio. -class DATASET_API EqualizerBiquad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. - /// \param[in] center_freq Filter's central frequency (in Hz). - /// \param[in] gain Desired gain at the boost (or attenuation) in dB. - /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. - EqualizerBiquad(int32_t sample_rate, float center_freq, float gain, float Q = 0.707); - - /// \brief Destructor. - ~EqualizerBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Add fade in or/and fade out on the input audio. -class DATASET_API Fade final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] fade_in_len Length of fade-in (time frames), which must be non-negative - /// and no more than the length of waveform. Default: 0. - /// \param[in] fade_out_len Length of fade-out (time frames), which must be non-negative - /// and no more than the length of waveform. Default: 0. - /// \param[in] fade_shape An enum for the fade shape. Default: FadeShape::kLinear. - explicit Fade(int32_t fade_in_len = 0, int32_t fade_out_len = 0, FadeShape fade_shape = FadeShape::kLinear); - - /// \brief Destructor. - ~Fade() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Design IIR forward and backward filter. -class DATASET_API Filtfilt final : public TensorTransform { - public: - /// \param[in] a_coeffs Numerator coefficients of difference equation of dimension of (n_order + 1). - /// Lower delays coefficients are first, e.g. [a0, a1, a2, ...]. - /// Must be same size as b_coeffs (pad with 0's as necessary). - /// \param[in] b_coeffs Numerator coefficients of difference equation of dimension of (n_order + 1). - /// Lower delays coefficients are first, e.g. [b0, b1, b2, ...]. - /// Must be same size as a_coeffs (pad with 0's as necessary). - /// \param[in] clamp If True, clamp the output signal to be in the range [-1, 1]. Default: True. - Filtfilt(const std::vector &a_coeffs, const std::vector &b_coeffs, bool clamp = true); - - /// \brief Destructor. - ~Filtfilt() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Apply a flanger effect to the audio. -class DATASET_API Flanger final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz). - /// \param[in] delay Desired delay in milliseconds (ms), range: [0, 30]. Default: 0.0. - /// \param[in] depth Desired delay depth in milliseconds (ms), range: [0, 10]. Default: 2.0. - /// \param[in] regen Desired regen (feedback gain) in dB., range: [-95, 95]. Default: 0.0. - /// \param[in] width Desired width (delay gain) in dB, range: [0, 100]. Default: 71.0. - /// \param[in] speed Modulation speed in Hz, range: [0.1, 10]. Default: 0.5. - /// \param[in] phase Percentage phase-shift for multi-channel, range: [0, 100]. Default: 25.0. - /// \param[in] modulation Modulation of input tensor, must be one of [Modulation::kSinusoidal, - /// Modulation::kTriangular]. Default:Modulation::kSinusoidal. - /// \param[in] interpolation Interpolation of input tensor, must be one of [Interpolation::kLinear, - /// Interpolation::kQuadratic]. Default:Interpolation::kLinear. - explicit Flanger(int32_t sample_rate, float delay = 0.0, float depth = 2.0, float regen = 0.0, float width = 71.0, - float speed = 0.5, float phase = 25.0, Modulation modulation = Modulation::kSinusoidal, - Interpolation interpolation = Interpolation::kLinear); - - /// \brief Destructor. - ~Flanger() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief FrequencyMasking TensorTransform. -/// \notes Apply masking to a spectrogram in the frequency domain. -class DATASET_API FrequencyMasking final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] iid_masks Whether to apply different masks to each example. - /// \param[in] frequency_mask_param Maximum possible length of the mask, range: [0, freq_length]. Default: 0. - /// Indices uniformly sampled from [0, frequency_mask_param]. - /// Mask width when iid_masks=true. - /// \param[in] mask_start Mask start when iid_masks=true, range: [0, freq_length-frequency_mask_param]. Default: 0. - /// \param[in] mask_value Mask value. - explicit FrequencyMasking(bool iid_masks = false, int32_t frequency_mask_param = 0, int32_t mask_start = 0, - float mask_value = 0.0); - - /// \brief Destructor. - ~FrequencyMasking() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Apply amplification or attenuation to the whole waveform. -class DATASET_API Gain final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] gain_db Gain adjustment in decibels (dB). Default: 1.0. - explicit Gain(float gain_db = 1.0); - - /// \brief Destructor. - ~Gain() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Waveform calculation from linear scalar amplitude spectrogram using GriffinLim transform. -class DATASET_API GriffinLim final : public TensorTransform { - public: - /// \brief Constructor. - /// \notes Calculated by formula: - /// x(n)=\frac{\sum_{m=-\infty}^{\infty} w(m S-n) y_{w}(m S, n)}{\sum_{m=-\infty}^{\infty} w^{2}(m S-n)} - /// where w represents the window function, y represents the reconstructed signal of each frame and x represents - /// the whole signal. - /// \param[in] n_fft Size of FFT. Default: 400. - /// \param[in] n_iter Number of iteration for phase recovery. Default: 32. - /// \param[in] win_length Window size for GriffinLim. Default: 0, will be set to n_fft. - /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will be set to win_length / 2. - /// \param[in] window_type Window type for GriffinLim. Default: WindowType::kHann. - /// \param[in] power Exponent for the magnitude spectrogram. Default: 2.0. - /// \param[in] momentum The momentum for fast Griffin-Lim. Default: 0.99. - /// \param[in] length Length of the expected output waveform. Default: 0.0, will be set to the value of last - /// dimension of the stft matrix. - /// \param[in] rand_init Flag for random phase initialization or all-zero phase initialization. Default: true. - explicit GriffinLim(int32_t n_fft = 400, int32_t n_iter = 32, int32_t win_length = 0, int32_t hop_length = 0, - WindowType window_type = WindowType::kHann, float power = 2.0, float momentum = 0.99, - int32_t length = 0, bool rand_init = true); - - /// \brief Destructor. - ~GriffinLim() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief HighpassBiquad TensorTransform. Apply highpass biquad filter on audio. -class DATASET_API HighpassBiquad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. - /// \param[in] cutoff_freq Filter cutoff frequency (in Hz). - /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. - HighpassBiquad(int32_t sample_rate, float cutoff_freq, float Q = 0.707); - - /// \brief Destructor. - ~HighpassBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief InverseMelScale TensorTransform -/// \notes Solve for a normal STFT from a mel frequency STFT, using a conversion matrix. -class DATASET_API InverseMelScale final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] n_stft Number of bins in STFT, must be positive. - /// \param[in] n_mels Number of mel filter, must be positive. Default: 128. - /// \param[in] sample_rate Sample rate of the signal, the value can't be zero. Default: 16000. - /// \param[in] f_min Minimum frequency, must be non-negative. Default: 0.0. - /// \param[in] f_max Maximum frequency, must be non-negative. Default: 0.0, will be set to sample_rate / 2. - /// \param[in] max_iter Maximum number of optimization iterations, must be positive. Default: 100000. - /// \param[in] tolerance_loss Value of loss to stop optimization at, must be non-negative. Default: 1e-5. - /// \param[in] tolerance_change Difference in losses to stop optimization at, must be non-negative. Default: 1e-8. - /// \param[in] sgdargs Parameters of SGD optimizer, including lr, momentum. - /// Default: {{"sgd_lr", 0.1}, {"sgd_momentum", 0.0}}. - /// \param[in] norm Type of norm, value should be NormType::kSlaney or NormType::kNone. If norm is NormType::kSlaney, - /// divide the triangle mel weight by the width of the mel band. Default: NormType::kNone. - /// \param[in] mel_type Type of mel, value should be MelType::kHtk or MelType::kSlaney. Default: MelType::kHtk. - explicit InverseMelScale(int32_t n_stft, int32_t n_mels = 128, int32_t sample_rate = 16000, float f_min = 0.0, - float f_max = 0.0, int32_t max_iter = 100000, float tolerance_loss = 1e-5, - float tolerance_change = 1e-8, - const std::map &sgdargs = {{"sgd_lr", 0.1}, {"sgd_momentum", 0.0}}, - NormType norm = NormType::kNone, MelType mel_type = MelType::kHtk); - - /// \brief Destructor. - ~InverseMelScale() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Create an inverse spectrogram to recover an audio signal from a spectrogram. -class DATASET_API InverseSpectrogram final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] length The output length of the waveform. Default: 0, means to output the whole waveform. - /// \param[in] n_fft Size of FFT, creates n_fft // 2 + 1 bins. Default: 400. - /// \param[in] win_length Window size. Default: 0, will be set to `n_fft` . - /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will be set to `win_length // 2` . - /// \param[in] pad Two sided padding of signal. Default: 0. - /// \param[in] window A function to create a window tensor that is applied/multiplied to each frame/window. - /// Default: WindowType::kHann. - /// \param[in] normalized Whether the spectrogram was normalized by magnitude after stft. Default:false. - /// \param[in] center Whether the signal in spectrogram was padded on both sides. Default: true. - /// \param[in] pad_mode Controls the padding method used when center is True. Default: BorderType::kReflect. - /// \param[in] onesided Controls whether spectrogram was used to return half of results to avoid - /// redundancy. Default: true. - explicit InverseSpectrogram(int32_t length = 0, int32_t n_fft = 400, int32_t win_length = 0, int32_t hop_length = 0, - int32_t pad = 0, WindowType window = WindowType::kHann, bool normalized = false, - bool center = true, BorderType pad_mode = BorderType::kReflect, bool onesided = true); - - /// \brief Destructor. - ~InverseSpectrogram() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Create LFCC for a raw audio signal. -class DATASET_API LFCC final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sample rate of audio signal. Default: 16000. - /// \param[in] n_filter Number of linear filters to apply. Default: 128. - /// \param[in] n_lfcc Number of lfc coefficients to retain. Default: 40. - /// \param[in] f_min Minimum frequency. Default: 0.0. - /// \param[in] f_max Maximum frequency. Default: 0.0, will be set to sample_rate // 2. - /// \param[in] dct_type Type of DCT (discrete cosine transform) to use. Default: 2. - /// \param[in] norm Norm to use. Default: NormMode::kOrtho. - /// \param[in] log_lf Whether to use log-lf spectrograms instead of db-scaled. Default: false. - /// \param[in] n_fft Size of FFT, creates n_fft // 2 + 1 bins. Default: 400. - /// \param[in] win_length Window size. Default: 0, will be set to n_fft. - /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will be set to win_length // 2. - /// \param[in] pad Two sided padding of signal. Default: 0. - /// \param[in] window A function to create a window tensor that is applied/multiplied to - /// each frame/window. Default: WindowType::kHann. - /// \param[in] power Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 - /// for power, etc. Default: 2.0. - /// \param[in] normalized Whether to normalize by magnitude after stft. Default: false - /// \param[in] center Whether to pad waveform on both sides so that the tt-th frame is centered at - /// time t t*hop_length. Default: true. - /// \param[in] pad_mode Controls the padding method used when center is True. Default: - /// BorderType::kReflect. - /// \param[in] onesided Controls whether to return half of results to avoid - /// redundancy. Default: true. - explicit LFCC(int32_t sample_rate = 16000, int32_t n_filter = 128, int32_t n_lfcc = 40, float f_min = 0.0, - float f_max = 0.0, int32_t dct_type = 2, NormMode norm = NormMode::kOrtho, bool log_lf = false, - int32_t n_fft = 400, int32_t win_length = 0, int32_t hop_length = 0, int32_t pad = 0, - WindowType window = WindowType::kHann, float power = 2.0, bool normalized = false, bool center = true, - BorderType pad_mode = BorderType::kReflect, bool onesided = true); - - /// \brief Destructor. - ~LFCC() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Design filter. Similar to SoX implementation. -class DATASET_API LFilter final : public TensorTransform { - public: - /// \param[in] a_coeffs Numerator coefficients of difference equation of dimension of (n_order + 1). - /// Lower delays coefficients are first, e.g. [a0, a1, a2, ...]. - /// Must be same size as b_coeffs (pad with 0's as necessary). - /// \param[in] b_coeffs Numerator coefficients of difference equation of dimension of (n_order + 1). - /// Lower delays coefficients are first, e.g. [b0, b1, b2, ...]. - /// Must be same size as a_coeffs (pad with 0's as necessary). - /// \param[in] clamp If True, clamp the output signal to be in the range [-1, 1]. Default: True. - explicit LFilter(const std::vector &a_coeffs, const std::vector &b_coeffs, bool clamp = true); - - /// \brief Destructor. - ~LFilter() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Creates a linear triangular filterbank. -/// \param output Tensor of a linear triangular filterbank. -/// \param n_freqs: Number of frequency. -/// \param f_min: Minimum of frequency in Hz. -/// \param f_max: Maximum of frequency in Hz. -/// \param n_filter: Number of (linear) triangular filter. -/// \param sample_rate: Sample rate. -/// \return Status code. -Status DATASET_API LinearFbanks(MSTensor *output, int32_t n_freqs, float f_min, float f_max, int32_t n_filter, - int32_t sample_rate); - -/// \brief Design biquad lowpass filter and perform filtering. Similar to SoX implementation. -class DATASET_API LowpassBiquad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. - /// \param[in] cutoff_freq Filter cutoff frequency. - /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. - LowpassBiquad(int32_t sample_rate, float cutoff_freq, float Q = 0.707); - - /// \brief Destructor. - ~LowpassBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Separate a complex-valued spectrogram with shape (..., 2) into its magnitude and phase. -class DATASET_API Magphase final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] power Power of the norm, which must be non-negative. Default: 1.0. - explicit Magphase(float power); - - /// \brief Destructor. - ~Magphase() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief MaskAlongAxis TensorTransform. -/// \note Tensor operation to mask the input tensor along axis. -class MaskAlongAxis final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] mask_start Starting position of the mask, which must be non negative. - /// \param[in] mask_width The width of the mask, which must be positive. - /// \param[in] mask_value Value to assign to the masked columns. - /// \param[in] axis Axis to apply masking on (1 for frequency and 2 for time). - MaskAlongAxis(int32_t mask_start, int32_t mask_width, float mask_value, int32_t axis); - - /// \brief Destructor. - ~MaskAlongAxis() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief MaskAlongAxisIID TensorTransform. -/// \note Apply a mask along axis. -class MaskAlongAxisIID final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] mask_param Number of columns to be masked, will be uniformly sampled from [0, mask_param], - /// must be non negative. - /// \param[in] mask_value Value to assign to the masked columns. - /// \param[in] axis Axis to apply masking on (1 for frequency and 2 for time). - MaskAlongAxisIID(int32_t mask_param, float mask_value, int32_t axis); - - /// \brief Destructor. - ~MaskAlongAxisIID() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief MelScale TensorTransform. -/// \notes Convert normal STFT to STFT at the Mel scale. -class DATASET_API MelScale final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] n_mels Number of mel filter, which must be positive. Default: 128. - /// \param[in] sample_rate Sample rate of the signal, the value can't be zero. Default: 16000. - /// \param[in] f_min Minimum frequency, which must be non negative. Default: 0.0. - /// \param[in] f_max Maximum frequency, which must be positive. Default: 0.0, will be set to sample_rate / 2. - /// \param[in] n_stft Number of bins in STFT, which must be positive. Default: 201. - /// \param[in] norm Type of norm, value should be NormType::kSlaney or NormType::kNone. If norm is NormType::kSlaney, - /// divide the triangle mel weight by the width of the mel band. Default: NormType::kNone. - /// \param[in] mel_type Type of mel, value should be MelType::kHtk or MelType::kSlaney. Default: MelType::kHtk. - explicit MelScale(int32_t n_mels = 128, int32_t sample_rate = 16000, float f_min = 0.0, float f_max = 0.0, - int32_t n_stft = 201, NormType norm = NormType::kNone, MelType mel_type = MelType::kHtk); - - /// \brief Destructor. - ~MelScale() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Create a frequency transformation matrix with shape (n_freqs, n_mels). -/// \param[in] output Tensor of the frequency transformation matrix. -/// \param[in] n_freqs Number of frequencies to highlight/apply. -/// \param[in] f_min Minimum frequency (Hz). -/// \param[in] f_max Maximum frequency (Hz). -/// \param[in] n_mels Number of mel filterbanks. -/// \param[in] sample_rate Sample rate of the audio waveform. -/// \param[in] norm Norm to use, can be NormType::kNone or NormType::kSlaney. Default: NormType::kNone. -/// \param[in] mel_type Scale to use, can be MelType::kHtk or MelType::kSlaney. Default: MelType::kHtz. -/// \return Status code. -Status DATASET_API MelscaleFbanks(MSTensor *output, int32_t n_freqs, float f_min, float f_max, int32_t n_mels, - int32_t sample_rate, NormType norm = NormType::kNone, - MelType mel_type = MelType::kHtk); - -/// \brief Create MelSpectrogram for a raw audio signal. -class DATASET_API MelSpectrogram final : public TensorTransform { - public: - /// \param[in] sample_rate Sample rate of audio signal. Default: 16000. - /// \param[in] n_fft Size of FFT, creates `n_fft // 2 + 1` bins. Default: 400. - /// \param[in] win_length Window size. Default: 0, will be set to `n_fft` . - /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will be set to `win_length // 2` . - /// \param[in] f_min Minimum frequency. Default: 0.0. - /// \param[in] f_max Maximum frequency. Default: 0.0. - /// \param[in] pad Two sided padding of signal. Default: 0. - /// \param[in] n_mels Number of mel filterbanks. Default: 128. - /// \param[in] window A function to create a window tensor that is applied/multiplied to each frame/window. - /// Default: WindowType::kHann. - /// \param[in] power Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 for power, etc. - /// Default: 2.0. - /// \param[in] normalized Whether to normalize by magnitude after stft Default: false. - /// \param[in] center Whether to pad waveform on both sides. Default: true. - /// \param[in] pad_mode Controls the padding method used when center is True. Default: BorderType::kReflect. - /// \param[in] onesided Controls whether to return half of results to avoid redundancy. Default: true. - /// \param[in] norm If 'slaney', divide the triangular mel weights by the width of the mel band (area normalization). - /// Default: NormType::kNone. - /// \param[in] mel_scale Scale to use: htk or slaney. Default: MelType::kHtk. - explicit MelSpectrogram(int32_t sample_rate = 16000, int32_t n_fft = 400, int32_t win_length = 0, - int32_t hop_length = 0, float f_min = 0.0, float f_max = 0.0, int32_t pad = 0, - int32_t n_mels = 128, WindowType window = WindowType::kHann, float power = 2.0, - bool normalized = false, bool center = true, BorderType pad_mode = BorderType::kReflect, - bool onesided = true, NormType norm = NormType::kNone, MelType mel_scale = MelType::kHtk); - - /// \brief Destructor. - ~MelSpectrogram() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Create MFCC for a raw audio signal. -class DATASET_API MFCC final : public TensorTransform { - public: - /// \param[in] sample_rate Sample rate of audio signal. Default: 16000. - /// \param[in] n_mfcc Number of mfc coefficients to retain. Default: 40. - /// \param[in] dct_type Type of DCT (discrete cosine transform) to use. Default: 2. - /// \param[in] norm If 'slaney', divide the triangular mel weights by the width of the mel band (area normalization). - /// Default: NormMode::kOrtho. - /// \param[in] log_mels Whether to use log-mel spectrograms instead of db-scaled. Default: false. - /// \param[in] n_fft Size of FFT, creates n_fft // 2 + 1 bins. Default: 400. - /// \param[in] win_length Window size. Default: 0. - /// \param[in] hop_length Length of hop between STFT windows. Default: 0. - /// \param[in] f_min Minimum frequency. Default: 0.0. - /// \param[in] f_max Maximum frequency. Default: 0.0. - /// \param[in] pad Two sided padding of signal. Default: 0. - /// \param[in] n_mels Number of mel filterbanks. Default: 128. - /// \param[in] window A function to create a window tensor that is applied/multiplied to each frame/window. - /// Default: WindowType::kHann. - /// \param[in] power Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 for power, etc. - /// Default: 2.0. - /// \param[in] normalized Whether to normalize by magnitude after stft. Default: false. - /// \param[in] center Whether to pad waveform on both sides. Default: true. - /// \param[in] pad_mode Controls the padding method used when center is True. Default: BorderType::kReflect. - /// \param[in] onesided Controls whether to return half of results to avoid redundancy. Default: true. - /// \param[in] norm_mel Norm to use. Default: NormType::kNone. - /// \param[in] mel_scale Scale to use: htk or slaney. Default: MelType::kHtk. - explicit MFCC(int32_t sample_rate = 16000, int32_t n_mfcc = 40, int32_t dct_type = 2, - NormMode norm = NormMode::kOrtho, bool log_mels = false, int32_t n_fft = 400, int32_t win_length = 0, - int32_t hop_length = 0, float f_min = 0.0, float f_max = 0.0, int32_t pad = 0, int32_t n_mels = 128, - WindowType window = WindowType::kHann, float power = 2.0, bool normalized = false, bool center = true, - BorderType pad_mode = BorderType::kReflect, bool onesided = true, NormType norm_mel = NormType::kNone, - MelType mel_scale = MelType::kHtk); - - /// \brief Destructor. - ~MFCC() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief MuLawDecoding TensorTransform. -/// \note Decode mu-law encoded signal. -class DATASET_API MuLawDecoding final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] quantization_channels Number of channels, which must be positive. Default: 256. - explicit MuLawDecoding(int32_t quantization_channels = 256); - - /// \brief Destructor. - ~MuLawDecoding() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief MuLawEncoding TensorTransform. -/// \note Encode signal based on mu-law companding. -class DATASET_API MuLawEncoding final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] quantization_channels Number of channels, which must be positive. Default: 256. - explicit MuLawEncoding(int32_t quantization_channels = 256); - - /// \brief Destructor. - ~MuLawEncoding() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Overdrive TensorTransform. -class DATASET_API Overdrive final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] gain Coefficient of overload in dB, in range of [0, 100]. Default: 20.0. - /// \param[in] color Coefficient of translation, in range of [0, 100]. Default: 20.0. - explicit Overdrive(float gain = 20.0, float color = 20.0); - - /// \brief Destructor. - ~Overdrive() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Phaser TensorTransform. -class DATASET_API Phaser final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz). - /// \param[in] gain_in Desired input gain at the boost (or attenuation) in dB. - /// Allowed range of values is [0, 1]. Default: 0.4. - /// \param[in] gain_out Desired output gain at the boost (or attenuation) in dB. - /// Allowed range of values is [0, 1e9]. Default: 0.74. - /// \param[in] delay_ms Desired delay in milli seconds. Allowed range of values is [0, 5]. Default: 3.0. - /// \param[in] decay Desired decay relative to gain-in. Allowed range of values is [0, 0.99]. Default: 0.4. - /// \param[in] mod_speed Modulation speed in Hz. Allowed range of values is [0.1, 2]. Default: 0.5. - /// \param[in] sinusoidal If true, use sinusoidal modulation (preferable for multiple instruments). - /// If false, use triangular modulation (gives single instruments a sharper phasing effect). Default: true. - explicit Phaser(int32_t sample_rate, float gain_in = 0.4, float gain_out = 0.74, float delay_ms = 3.0, - float decay = 0.4, float mod_speed = 0.5, bool sinusoidal = true); - - /// \brief Destructor. - ~Phaser() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief PhaseVocoder TensorTransform -/// \notes Given a STFT tensor, speed up in time without modifying pitch by factor of rate. -class DATASET_API PhaseVocoder final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] rate Speed-up factor. - /// \param[in] phase_advance Expected phase advance in each bin in shape of (freq, 1). - PhaseVocoder(float rate, const MSTensor &phase_advance); - - /// \brief Destructor. - ~PhaseVocoder() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -// \brief Shift the pitch of a waveform by 'n_steps' steps. -class DATASET_API PitchShift final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of "waveform". Default: 0. - /// \param[in] n_steps The (fractional) steps to shift "waveform". Default: 0. - /// \param[in] bins_per_octave The number of steps per octave. Default: 12. - /// \param[in] n_fft Size of FFT, creates "n_fft // 2 + 1" bins. Default: 512. - /// \param[in] win_length Window size. Default: 0, will be set to `n_fft` . - /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will be set to `win_length // 4` . - /// \param[in] window Window tensor that is applied/multiplied to each frame/window. Default: WindowType::kHann. - explicit PitchShift(int32_t sample_rate = 0, int32_t n_steps = 0, int32_t bins_per_octave = 12, int32_t n_fft = 512, - int32_t win_length = 0, int32_t hop_length = 0, WindowType window = WindowType::kHann); - - /// \brief Destructor. - ~PitchShift() override = default; - - protected: - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Resample TensorTransform. -/// \notes Resample a signal from one frequency to another. A sampling method can be given. -class DATASET_API Resample : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] orig_freq The original frequency of the signal, which must be positive. Default: 16000.0. - /// \param[in] new_freq The desired frequency, which must be positive. Default: 16000.0. - /// \param[in] resample_method The resampling method, which can be ResampleMethod::kSincInterpolation - /// and ResampleMethod::kKaiserWindow. Default: ResampleMethod::kSincInterpolation. - /// \param[in] lowpass_filter_width Controls the sharpness of the filter, more means sharper but less efficient, - /// which must be positive. Default: 6. - /// \param[in] rolloff The roll-off frequency of the filter, as a fraction of the Nyquist. Lower values - /// reduce anti-aliasing, but also reduce some of the highest frequencies, range: (0, 1]. Default: 0.99. - /// \param[in] beta The shape parameter used for kaiser window. Default: 14.769656459379492. - explicit Resample(float orig_freq = 16000.0, float new_freq = 16000.0, - ResampleMethod resample_method = ResampleMethod::kSincInterpolation, - int32_t lowpass_filter_width = 6, float rolloff = 0.99, float beta = 14.769656459379492); - - /// \brief Destructor. - ~Resample() override = default; - - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Apply RIAA vinyl playback equalization. -class DATASET_API RiaaBiquad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), - /// can only be one of 44100, 48000, 88200, 96000. - explicit RiaaBiquad(int32_t sample_rate); - - /// \brief Destructor. - ~RiaaBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Apply sliding-window cepstral mean (and optionally variance) normalization per utterance. -class DATASET_API SlidingWindowCmn final : public TensorTransform { - public: - /// \brief Constructor of SlidingWindowCmnOp. - /// \param[in] cmn_window The window in frames for running average CMN computation. Default: 600. - /// \param[in] min_cmn_window The minimum CMN window. Only applicable if center is false, ignored if center - /// is true. Default: 100. - /// \param[in] center If true, use a window centered on the current frame. If false, window is to the left. - /// Default: false. - /// \param[in] norm_vars If true, normalize variance to one. Default: false. - explicit SlidingWindowCmn(int32_t cmn_window = 600, int32_t min_cmn_window = 100, bool center = false, - bool norm_vars = false); - - /// \brief Destructor. - ~SlidingWindowCmn() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Create a spectral centroid from an audio signal. -class DATASET_API SpectralCentroid : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz). - /// \param[in] n_fft Size of FFT, creates n_fft / 2 + 1 bins. Default: 400. - /// \param[in] win_length Window size. Default: 0, will use n_fft. - /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will use win_length / 2. - /// \param[in] pad Two sided padding of signal. Default: 0. - /// \param[in] window Window function that is applied/multiplied to each frame/window, - /// which can be WindowType::kBartlett, WindowType::kBlackman, WindowType::kHamming, - /// WindowType::kHann or WindowType::kKaiser. Default: WindowType::kHann. - explicit SpectralCentroid(int32_t sample_rate, int32_t n_fft = 400, int32_t win_length = 0, int32_t hop_length = 0, - int32_t pad = 0, WindowType window = WindowType::kHann); - - ~SpectralCentroid() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - int32_t sample_rate_; - int32_t n_fft_; - int32_t win_length_; - int32_t hop_length_; - int32_t pad_; - WindowType window_; - struct Data; - std::shared_ptr data_; -}; - -/// \brief Create a spectrogram from an audio signal. -class DATASET_API Spectrogram : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] n_fft Size of FFT, creates n_fft / 2 + 1 bins. Default: 400. - /// \param[in] win_length Window size. Default: 0, will use n_fft. - /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will use win_length / 2. - /// \param[in] pad Two sided padding of signal. Default: 0. - /// \param[in] window Window function that is applied/multiplied to each frame/window, - /// which can be WindowType::kBartlett, WindowType::kBlackman, WindowType::kHamming, - /// WindowType::kHann or WindowType::kKaiser. Default: WindowType::kHann. - /// \param[in] power Exponent for the magnitude spectrogram, which must be greater than or equal to 0. Default: 2.0. - /// \param[in] normalized Whether to normalize by magnitude after stft. Default: false. - /// \param[in] center Whether to pad waveform on both sides. Default: true. - /// \param[in] pad_mode Controls the padding method used when center is true, - /// which can be BorderType::kReflect, BorderType::kConstant, BorderType::kEdge, - /// BorderType::kSymmetric. Default: BorderType::kReflect. - /// \param[in] onesided Controls whether to return half of results to avoid redundancy. Default: true. - explicit Spectrogram(int32_t n_fft = 400, int32_t win_length = 0, int32_t hop_length = 0, int32_t pad = 0, - WindowType window = WindowType::kHann, float power = 2.0, bool normalized = false, - bool center = true, BorderType pad_mode = BorderType::kReflect, bool onesided = true); - - /// \brief Destructor. - ~Spectrogram() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - int32_t n_fft_; - int32_t win_length_; - int32_t hop_length_; - int32_t pad_; - WindowType window_; - float power_; - bool normalized_; - bool center_; - BorderType pad_mode_; - bool onesided_; - struct Data; - std::shared_ptr data_; -}; - -/// \brief TimeMasking TensorTransform. -/// \notes Apply masking to a spectrogram in the time domain. -class DATASET_API TimeMasking final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] iid_masks Whether to apply different masks to each example. - /// \param[in] time_mask_param Maximum possible length of the mask, range: [0, time_length]. Default: 0. - /// Indices uniformly sampled from [0, time_mask_param]. - /// Mask width when iid_masks=true. - /// \param[in] mask_start Mask start when iid_masks=true, range: [0, time_length-time_mask_param]. Default: 0. - /// \param[in] mask_value Mask value. - explicit TimeMasking(bool iid_masks = false, int32_t time_mask_param = 0, int32_t mask_start = 0, - float mask_value = 0.0); - - /// \brief Destructor. - ~TimeMasking() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief TimeStretch TensorTransform -/// \notes Stretch STFT in time at a given rate, without changing the pitch. -class DATASET_API TimeStretch final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] hop_length Length of hop between STFT windows. Default: None, will use ((n_freq - 1) * 2) // 2. - /// \param[in] n_freq Number of filter banks form STFT. Default: 201. - /// \param[in] fixed_rate Rate to speed up or slow down the input in time. - /// Default: std::numeric_limits::quiet_NaN(), will keep the original rate. - explicit TimeStretch(float hop_length = std::numeric_limits::quiet_NaN(), int n_freq = 201, - float fixed_rate = std::numeric_limits::quiet_NaN()); - - /// \brief Destructor. - ~TimeStretch() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Design a treble tone-control effect. -class DATASET_API TrebleBiquad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. - /// \param[in] gain Desired gain at the boost (or attenuation) in dB. - /// \param[in] central_freq Central frequency (in Hz). Default: 3000.0. - /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. - TrebleBiquad(int32_t sample_rate, float gain, float central_freq = 3000.0, float Q = 0.707); - - /// \brief Destructor. - ~TrebleBiquad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Vad TensorTransform. -/// \notes Attempt to trim silent background sounds from the end of the voice recording. -class DATASET_API Vad final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] sample_rate Sample rate of audio signal. - /// \param[in] trigger_level The measurement level used to trigger activity detection. Default: 7.0. - /// \param[in] trigger_time The time constant (in seconds) used to help ignore short sounds. Default: 0.25. - /// \param[in] search_time The amount of audio (in seconds) to search for quieter/shorter sounds to include prior to - /// the detected trigger point. Default: 1.0. - /// \param[in] allowed_gap The allowed gap (in seconds) between quiteter/shorter sounds to include prior to the - /// detected trigger point. Default: 0.25. - /// \param[in] pre_trigger_time The amount of audio (in seconds) to preserve before the trigger point and any found - /// quieter/shorter bursts. Default: 0.0. - /// \param[in] boot_time The time for the initial noise estimate. Default: 0.35. - /// \param[in] noise_up_time Time constant used by the adaptive noise estimator, when the noise level is increasing. - /// Default: 0.1. - /// \param[in] noise_down_time Time constant used by the adaptive noise estimator, when the noise level is decreasing. - /// Default: 0.01. - /// \param[in] noise_reduction_amount The amount of noise reduction used in the detection algorithm. Default: 1.35. - /// \param[in] measure_freq The frequency of the algorithm’s processing. Default: 20.0. - /// \param[in] measure_duration The duration of measurement. Default: 0, use twice the measurement period. - /// \param[in] measure_smooth_time The time constant used to smooth spectral measurements. Default: 0.4. - /// \param[in] hp_filter_freq The "Brick-wall" frequency of high-pass filter applied at the input to the detector - /// algorithm. Default: 50.0. - /// \param[in] lp_filter_freq The "Brick-wall" frequency of low-pass filter applied at the input to the detector - /// algorithm. Default: 6000.0. - /// \param[in] hp_lifter_freq The "Brick-wall" frequency of high-pass lifter applied at the input to the detector - /// algorithm. Default: 150.0. - /// \param[in] lp_lifter_freq The "Brick-wall" frequency of low-pass lifter applied at the input to the detector - /// algorithm. Default: 2000.0. - explicit Vad(int32_t sample_rate, float trigger_level = 7.0, float trigger_time = 0.25, float search_time = 1.0, - float allowed_gap = 0.25, float pre_trigger_time = 0.0, float boot_time = 0.35, - float noise_up_time = 0.1, float noise_down_time = 0.01, float noise_reduction_amount = 1.35, - float measure_freq = 20.0, float measure_duration = 0.0, float measure_smooth_time = 0.4, - float hp_filter_freq = 50.0, float lp_filter_freq = 6000.0, float hp_lifter_freq = 150.0, - float lp_lifter_freq = 2000.0); - - /// \brief Destructor. - ~Vad() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; - -/// \brief Vol TensorTransform. -/// \notes Add a volume to an waveform. -class DATASET_API Vol final : public TensorTransform { - public: - /// \brief Constructor. - /// \param[in] gain Gain value, varies according to the value of gain_type. If gain_type is GainType::kAmplitude, - /// gain must be greater than or equal to zero. If gain_type is GainType::kPower, gain must be greater than zero. - /// If gain_type is GainType::kDb, there is no limit for gain. - /// \param[in] gain_type Type of gain, should be one of [GainType::kAmplitude, GainType::kDb, GainType::kPower]. - explicit Vol(float gain, GainType gain_type = GainType::kAmplitude); - - /// \brief Destructor. - ~Vol() override = default; - - protected: - /// \brief Function to convert TensorTransform object into a TensorOperation object. - /// \return Shared pointer to TensorOperation object. - std::shared_ptr Parse() override; - - private: - struct Data; - std::shared_ptr data_; -}; -} // namespace audio -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_AUDIO_H_ +/** + * Copyright 2021-2024 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_AUDIO_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_AUDIO_H_ + +#include +#include +#include +#include +#include +#include + +#include "include/api/dual_abi_helper.h" +#include "include/api/status.h" +#include "include/api/visible.h" +#include "include/dataset/constants.h" +#include "include/dataset/transforms.h" + +namespace mindspore::lite { +namespace dataset { +class TensorOperation; + +// Transform operations for performing computer audio. +namespace audio { +/// \brief Compute the angle of complex tensor input. +class DATASET_API Angle final : public TensorTransform { + public: + /// \brief Constructor. + Angle(); + + /// \brief Destructor. + ~Angle() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; +}; + +/// \brief Design two-pole allpass filter. Similar to SoX implementation. +class DATASET_API AllpassBiquad final : public TensorTransform { + public: + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. + /// \param[in] central_freq Central frequency (in Hz). + /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. + explicit AllpassBiquad(int32_t sample_rate, float central_freq, float Q = 0.707); + + /// \brief Destructor. + ~AllpassBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief AmplitudeToDB TensorTransform. +/// \notes Turn a tensor from the power/amplitude scale to the decibel scale. +class DATASET_API AmplitudeToDB final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] stype Scale of input tensor, must be one of [ScaleType::kPower, ScaleType::kMagnitude]. + /// Default: ScaleType::kPower. + /// \param[in] ref_value Calculate db_multiplier. Default: 1.0. + /// \param[in] amin Minimum threshold for input tensor and ref_value. It must be greater than zero. Default: 1e-10. + /// \param[in] top_db Decibels cut-off value. It must be greater than or equal to zero. Default: 80.0. + explicit AmplitudeToDB(ScaleType stype = ScaleType::kPower, float ref_value = 1.0, float amin = 1e-10, + float top_db = 80.0); + + /// \brief Destructor. + ~AmplitudeToDB() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Design two-pole band filter. +class DATASET_API BandBiquad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. + /// \param[in] central_freq Central frequency (in Hz). + /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. + /// \param[in] noise Choose alternate mode for un-pitched audio or mode oriented to pitched audio. Default: False. + explicit BandBiquad(int32_t sample_rate, float central_freq, float Q = 0.707, bool noise = false); + + /// \brief Destructor. + ~BandBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Design two-pole band-pass filter. +class DATASET_API BandpassBiquad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. + /// \param[in] central_freq Central frequency (in Hz). + /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. + /// \param[in] const_skirt_gain, If True, uses a constant skirt gain (peak gain = Q). If False, uses a + /// constant 0dB peak gain. Default: False. + explicit BandpassBiquad(int32_t sample_rate, float central_freq, float Q = 0.707, bool const_skirt_gain = false); + + /// \brief Destructor. + ~BandpassBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Design two-pole band-reject filter. Similar to SoX implementation. +class DATASET_API BandrejectBiquad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. + /// \param[in] central_freq Central frequency (in Hz). + /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. + explicit BandrejectBiquad(int32_t sample_rate, float central_freq, float Q = 0.707); + + /// \brief Destructor. + ~BandrejectBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Design a bass tone-control effect. +class DATASET_API BassBiquad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. + /// \param[in] gain Desired gain at the boost (or attenuation) in dB. + /// \param[in] central_freq Central frequency (in Hz). Default: 100.0. + /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. + explicit BassBiquad(int32_t sample_rate, float gain, float central_freq = 100.0, float Q = 0.707); + + /// \brief Destructor. + ~BassBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Perform a biquad filter of input tensor. +class DATASET_API Biquad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] b0 Numerator coefficient of current input, x[n]. + /// \param[in] b1 Numerator coefficient of input one time step ago x[n-1]. + /// \param[in] b2 Numerator coefficient of input two time steps ago x[n-2]. + /// \param[in] a0 Denominator coefficient of current output y[n], the value can't be zero, typically 1. + /// \param[in] a1 Denominator coefficient of current output y[n-1]. + /// \param[in] a2 Denominator coefficient of current output y[n-2]. + explicit Biquad(float b0, float b1, float b2, float a0, float a1, float a2); + + /// \brief Destructor. + ~Biquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief ComplexNorm TensorTransform. +/// \notes Compute the norm of complex tensor input. +class DATASET_API ComplexNorm final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] power Power of the norm, which must be non-negative. Default: 1.0. + explicit ComplexNorm(float power = 1.0); + + /// \brief Destructor. + ~ComplexNorm() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief ComputeDeltas Transform. +/// \note Compute delta coefficients of a spectrogram. +class DATASET_API ComputeDeltas final : public TensorTransform { + public: + /// \brief Construct a new Compute Deltas object. + /// \f[ + /// d_{t}=\frac{{\textstyle\sum_{n=1}^{N}}n(c_{t+n}-c_{t-n})}{2{\textstyle\sum_{n=1}^{N}}n^{2}} + /// \f] + /// \param[in] win_length The window length used for computing delta, must be no less than 3. Default: 5. + /// \param[in] pad_mode Padding mode. Can be one of BorderType::kConstant, BorderType::kEdge, + /// BorderType::kReflect or BorderType::kSymmetric. Default: BorderType::kEdge. + explicit ComputeDeltas(int32_t win_length = 5, BorderType pad_mode = BorderType::kEdge); + + /// \brief Destructor. + ~ComputeDeltas() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Apply contrast effect. +class DATASET_API Contrast final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] enhancement_amount Controls the amount of the enhancement. Default: 75.0. + explicit Contrast(float enhancement_amount = 75.0); + + /// \brief Destructor. + ~Contrast() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Turn a waveform from the decibel scale to the power/amplitude scale. +class DATASET_API DBToAmplitude final : public TensorTransform { + public: + /// \brief Constructor + /// \param[in] ref Reference which the output will be scaled by. + /// \param[in] power If power equals 1, will compute DB to power. If 0.5, will compute DB to amplitude. + explicit DBToAmplitude(float ref, float power); + + /// \brief Destructor. + ~DBToAmplitude() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Apply a DC shift to the audio. +class DATASET_API DCShift : public TensorTransform { + public: + /// \brief Constructor + /// \param[in] shift Indicates the amount to shift the audio, the value must be in the range [-2.0, 2.0]. + /// \param[in] limiter_gain Used only on peaks to prevent clipping. + DCShift(float shift, float limiter_gain); + + /// \brief Constructor + /// \param[in] shift Indicates the amount to shift the audio. + /// \note This constructor will use `shift` as `limiter_gain`. + explicit DCShift(float shift); + + /// \brief Destructor. + ~DCShift() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \param[in] n_mfcc Number of mfc coefficients to retain, the value must be greater than 0. +/// \param[in] n_mels Number of mel filterbanks, the value must be greater than 0. +/// \param[in] norm Norm to use, can be NormMode::kNone or NormMode::kOrtho. +/// \return Status error code, returns OK if no error encountered. +Status CreateDct(mindspore::MSTensor *output, int32_t n_mfcc, int32_t n_mels, NormMode norm = NormMode::kNone); + +/// \brief Design two-pole deemph filter. Similar to SoX implementation. +class DATASET_API DeemphBiquad final : public TensorTransform { + public: + /// \param[in] sample_rate Sampling rate of the waveform, the value can only be 44100 (Hz) or 48000(hz). + explicit DeemphBiquad(int32_t sample_rate); + + /// \brief Destructor. + ~DeemphBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Detect pitch frequency. +class DATASET_API DetectPitchFrequency final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. + /// \param[in] frame_time Duration of a frame, the value must be greater than zero. Default: 0.02. + /// \param[in] win_length The window length for median smoothing (in number of frames), the value must + /// be greater than zero. Default: 30. + /// \param[in] freq_low Lowest frequency that can be detected (Hz), the value must be greater than zero. Default: 85. + /// \param[in] freq_high Highest frequency that can be detected (Hz), the value must be greater than + /// zero. Default: 3400. + explicit DetectPitchFrequency(int32_t sample_rate, float frame_time = 0.01, int32_t win_length = 30, + int32_t freq_low = 85, int32_t freq_high = 3400); + + /// \brief Destructor. + ~DetectPitchFrequency() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Dither increases the perceived dynamic range of audio stored at a +/// particular bit-depth by eliminating nonlinear truncation distortion. +class DATASET_API Dither final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] density_function The density function of a continuous random variable. + /// Can be one of DensityFunction::kTPDF (Triangular Probability Density Function), + /// DensityFunction::kRPDF (Rectangular Probability Density Function) or + /// DensityFunction::kGPDF (Gaussian Probability Density Function). Default: DensityFunction::kTPDF. + /// \param[in] noise_shaping A filtering process that shapes the spectral energy of + /// quantisation error. Default: false. + explicit Dither(DensityFunction density_function = DensityFunction::kTPDF, bool noise_shaping = false); + + /// \brief Destructor. + ~Dither() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief EqualizerBiquad TensorTransform. Apply highpass biquad filter on audio. +class DATASET_API EqualizerBiquad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. + /// \param[in] center_freq Filter's central frequency (in Hz). + /// \param[in] gain Desired gain at the boost (or attenuation) in dB. + /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. + EqualizerBiquad(int32_t sample_rate, float center_freq, float gain, float Q = 0.707); + + /// \brief Destructor. + ~EqualizerBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Add fade in or/and fade out on the input audio. +class DATASET_API Fade final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] fade_in_len Length of fade-in (time frames), which must be non-negative + /// and no more than the length of waveform. Default: 0. + /// \param[in] fade_out_len Length of fade-out (time frames), which must be non-negative + /// and no more than the length of waveform. Default: 0. + /// \param[in] fade_shape An enum for the fade shape. Default: FadeShape::kLinear. + explicit Fade(int32_t fade_in_len = 0, int32_t fade_out_len = 0, FadeShape fade_shape = FadeShape::kLinear); + + /// \brief Destructor. + ~Fade() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Design IIR forward and backward filter. +class DATASET_API Filtfilt final : public TensorTransform { + public: + /// \param[in] a_coeffs Numerator coefficients of difference equation of dimension of (n_order + 1). + /// Lower delays coefficients are first, e.g. [a0, a1, a2, ...]. + /// Must be same size as b_coeffs (pad with 0's as necessary). + /// \param[in] b_coeffs Numerator coefficients of difference equation of dimension of (n_order + 1). + /// Lower delays coefficients are first, e.g. [b0, b1, b2, ...]. + /// Must be same size as a_coeffs (pad with 0's as necessary). + /// \param[in] clamp If True, clamp the output signal to be in the range [-1, 1]. Default: True. + Filtfilt(const std::vector &a_coeffs, const std::vector &b_coeffs, bool clamp = true); + + /// \brief Destructor. + ~Filtfilt() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Apply a flanger effect to the audio. +class DATASET_API Flanger final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz). + /// \param[in] delay Desired delay in milliseconds (ms), range: [0, 30]. Default: 0.0. + /// \param[in] depth Desired delay depth in milliseconds (ms), range: [0, 10]. Default: 2.0. + /// \param[in] regen Desired regen (feedback gain) in dB., range: [-95, 95]. Default: 0.0. + /// \param[in] width Desired width (delay gain) in dB, range: [0, 100]. Default: 71.0. + /// \param[in] speed Modulation speed in Hz, range: [0.1, 10]. Default: 0.5. + /// \param[in] phase Percentage phase-shift for multi-channel, range: [0, 100]. Default: 25.0. + /// \param[in] modulation Modulation of input tensor, must be one of [Modulation::kSinusoidal, + /// Modulation::kTriangular]. Default:Modulation::kSinusoidal. + /// \param[in] interpolation Interpolation of input tensor, must be one of [Interpolation::kLinear, + /// Interpolation::kQuadratic]. Default:Interpolation::kLinear. + explicit Flanger(int32_t sample_rate, float delay = 0.0, float depth = 2.0, float regen = 0.0, float width = 71.0, + float speed = 0.5, float phase = 25.0, Modulation modulation = Modulation::kSinusoidal, + Interpolation interpolation = Interpolation::kLinear); + + /// \brief Destructor. + ~Flanger() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief FrequencyMasking TensorTransform. +/// \notes Apply masking to a spectrogram in the frequency domain. +class DATASET_API FrequencyMasking final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] iid_masks Whether to apply different masks to each example. + /// \param[in] frequency_mask_param Maximum possible length of the mask, range: [0, freq_length]. Default: 0. + /// Indices uniformly sampled from [0, frequency_mask_param]. + /// Mask width when iid_masks=true. + /// \param[in] mask_start Mask start when iid_masks=true, range: [0, freq_length-frequency_mask_param]. Default: 0. + /// \param[in] mask_value Mask value. + explicit FrequencyMasking(bool iid_masks = false, int32_t frequency_mask_param = 0, int32_t mask_start = 0, + float mask_value = 0.0); + + /// \brief Destructor. + ~FrequencyMasking() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Apply amplification or attenuation to the whole waveform. +class DATASET_API Gain final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] gain_db Gain adjustment in decibels (dB). Default: 1.0. + explicit Gain(float gain_db = 1.0); + + /// \brief Destructor. + ~Gain() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Waveform calculation from linear scalar amplitude spectrogram using GriffinLim transform. +class DATASET_API GriffinLim final : public TensorTransform { + public: + /// \brief Constructor. + /// \notes Calculated by formula: + /// x(n)=\frac{\sum_{m=-\infty}^{\infty} w(m S-n) y_{w}(m S, n)}{\sum_{m=-\infty}^{\infty} w^{2}(m S-n)} + /// where w represents the window function, y represents the reconstructed signal of each frame and x represents + /// the whole signal. + /// \param[in] n_fft Size of FFT. Default: 400. + /// \param[in] n_iter Number of iteration for phase recovery. Default: 32. + /// \param[in] win_length Window size for GriffinLim. Default: 0, will be set to n_fft. + /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will be set to win_length / 2. + /// \param[in] window_type Window type for GriffinLim. Default: WindowType::kHann. + /// \param[in] power Exponent for the magnitude spectrogram. Default: 2.0. + /// \param[in] momentum The momentum for fast Griffin-Lim. Default: 0.99. + /// \param[in] length Length of the expected output waveform. Default: 0.0, will be set to the value of last + /// dimension of the stft matrix. + /// \param[in] rand_init Flag for random phase initialization or all-zero phase initialization. Default: true. + explicit GriffinLim(int32_t n_fft = 400, int32_t n_iter = 32, int32_t win_length = 0, int32_t hop_length = 0, + WindowType window_type = WindowType::kHann, float power = 2.0, float momentum = 0.99, + int32_t length = 0, bool rand_init = true); + + /// \brief Destructor. + ~GriffinLim() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief HighpassBiquad TensorTransform. Apply highpass biquad filter on audio. +class DATASET_API HighpassBiquad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. + /// \param[in] cutoff_freq Filter cutoff frequency (in Hz). + /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. + HighpassBiquad(int32_t sample_rate, float cutoff_freq, float Q = 0.707); + + /// \brief Destructor. + ~HighpassBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief InverseMelScale TensorTransform +/// \notes Solve for a normal STFT from a mel frequency STFT, using a conversion matrix. +class DATASET_API InverseMelScale final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] n_stft Number of bins in STFT, must be positive. + /// \param[in] n_mels Number of mel filter, must be positive. Default: 128. + /// \param[in] sample_rate Sample rate of the signal, the value can't be zero. Default: 16000. + /// \param[in] f_min Minimum frequency, must be non-negative. Default: 0.0. + /// \param[in] f_max Maximum frequency, must be non-negative. Default: 0.0, will be set to sample_rate / 2. + /// \param[in] max_iter Maximum number of optimization iterations, must be positive. Default: 100000. + /// \param[in] tolerance_loss Value of loss to stop optimization at, must be non-negative. Default: 1e-5. + /// \param[in] tolerance_change Difference in losses to stop optimization at, must be non-negative. Default: 1e-8. + /// \param[in] sgdargs Parameters of SGD optimizer, including lr, momentum. + /// Default: {{"sgd_lr", 0.1}, {"sgd_momentum", 0.0}}. + /// \param[in] norm Type of norm, value should be NormType::kSlaney or NormType::kNone. If norm is NormType::kSlaney, + /// divide the triangle mel weight by the width of the mel band. Default: NormType::kNone. + /// \param[in] mel_type Type of mel, value should be MelType::kHtk or MelType::kSlaney. Default: MelType::kHtk. + explicit InverseMelScale(int32_t n_stft, int32_t n_mels = 128, int32_t sample_rate = 16000, float f_min = 0.0, + float f_max = 0.0, int32_t max_iter = 100000, float tolerance_loss = 1e-5, + float tolerance_change = 1e-8, + const std::map &sgdargs = {{"sgd_lr", 0.1}, {"sgd_momentum", 0.0}}, + NormType norm = NormType::kNone, MelType mel_type = MelType::kHtk); + + /// \brief Destructor. + ~InverseMelScale() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Create an inverse spectrogram to recover an audio signal from a spectrogram. +class DATASET_API InverseSpectrogram final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] length The output length of the waveform. Default: 0, means to output the whole waveform. + /// \param[in] n_fft Size of FFT, creates n_fft // 2 + 1 bins. Default: 400. + /// \param[in] win_length Window size. Default: 0, will be set to `n_fft` . + /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will be set to `win_length // 2` . + /// \param[in] pad Two sided padding of signal. Default: 0. + /// \param[in] window A function to create a window tensor that is applied/multiplied to each frame/window. + /// Default: WindowType::kHann. + /// \param[in] normalized Whether the spectrogram was normalized by magnitude after stft. Default:false. + /// \param[in] center Whether the signal in spectrogram was padded on both sides. Default: true. + /// \param[in] pad_mode Controls the padding method used when center is True. Default: BorderType::kReflect. + /// \param[in] onesided Controls whether spectrogram was used to return half of results to avoid + /// redundancy. Default: true. + explicit InverseSpectrogram(int32_t length = 0, int32_t n_fft = 400, int32_t win_length = 0, int32_t hop_length = 0, + int32_t pad = 0, WindowType window = WindowType::kHann, bool normalized = false, + bool center = true, BorderType pad_mode = BorderType::kReflect, bool onesided = true); + + /// \brief Destructor. + ~InverseSpectrogram() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Create LFCC for a raw audio signal. +class DATASET_API LFCC final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sample rate of audio signal. Default: 16000. + /// \param[in] n_filter Number of linear filters to apply. Default: 128. + /// \param[in] n_lfcc Number of lfc coefficients to retain. Default: 40. + /// \param[in] f_min Minimum frequency. Default: 0.0. + /// \param[in] f_max Maximum frequency. Default: 0.0, will be set to sample_rate // 2. + /// \param[in] dct_type Type of DCT (discrete cosine transform) to use. Default: 2. + /// \param[in] norm Norm to use. Default: NormMode::kOrtho. + /// \param[in] log_lf Whether to use log-lf spectrograms instead of db-scaled. Default: false. + /// \param[in] n_fft Size of FFT, creates n_fft // 2 + 1 bins. Default: 400. + /// \param[in] win_length Window size. Default: 0, will be set to n_fft. + /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will be set to win_length // 2. + /// \param[in] pad Two sided padding of signal. Default: 0. + /// \param[in] window A function to create a window tensor that is applied/multiplied to + /// each frame/window. Default: WindowType::kHann. + /// \param[in] power Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 + /// for power, etc. Default: 2.0. + /// \param[in] normalized Whether to normalize by magnitude after stft. Default: false + /// \param[in] center Whether to pad waveform on both sides so that the tt-th frame is centered at + /// time t t*hop_length. Default: true. + /// \param[in] pad_mode Controls the padding method used when center is True. Default: + /// BorderType::kReflect. + /// \param[in] onesided Controls whether to return half of results to avoid + /// redundancy. Default: true. + explicit LFCC(int32_t sample_rate = 16000, int32_t n_filter = 128, int32_t n_lfcc = 40, float f_min = 0.0, + float f_max = 0.0, int32_t dct_type = 2, NormMode norm = NormMode::kOrtho, bool log_lf = false, + int32_t n_fft = 400, int32_t win_length = 0, int32_t hop_length = 0, int32_t pad = 0, + WindowType window = WindowType::kHann, float power = 2.0, bool normalized = false, bool center = true, + BorderType pad_mode = BorderType::kReflect, bool onesided = true); + + /// \brief Destructor. + ~LFCC() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Design filter. Similar to SoX implementation. +class DATASET_API LFilter final : public TensorTransform { + public: + /// \param[in] a_coeffs Numerator coefficients of difference equation of dimension of (n_order + 1). + /// Lower delays coefficients are first, e.g. [a0, a1, a2, ...]. + /// Must be same size as b_coeffs (pad with 0's as necessary). + /// \param[in] b_coeffs Numerator coefficients of difference equation of dimension of (n_order + 1). + /// Lower delays coefficients are first, e.g. [b0, b1, b2, ...]. + /// Must be same size as a_coeffs (pad with 0's as necessary). + /// \param[in] clamp If True, clamp the output signal to be in the range [-1, 1]. Default: True. + explicit LFilter(const std::vector &a_coeffs, const std::vector &b_coeffs, bool clamp = true); + + /// \brief Destructor. + ~LFilter() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Creates a linear triangular filterbank. +/// \param output Tensor of a linear triangular filterbank. +/// \param n_freqs: Number of frequency. +/// \param f_min: Minimum of frequency in Hz. +/// \param f_max: Maximum of frequency in Hz. +/// \param n_filter: Number of (linear) triangular filter. +/// \param sample_rate: Sample rate. +/// \return Status code. +Status DATASET_API LinearFbanks(MSTensor *output, int32_t n_freqs, float f_min, float f_max, int32_t n_filter, + int32_t sample_rate); + +/// \brief Design biquad lowpass filter and perform filtering. Similar to SoX implementation. +class DATASET_API LowpassBiquad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. + /// \param[in] cutoff_freq Filter cutoff frequency. + /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. + LowpassBiquad(int32_t sample_rate, float cutoff_freq, float Q = 0.707); + + /// \brief Destructor. + ~LowpassBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Separate a complex-valued spectrogram with shape (..., 2) into its magnitude and phase. +class DATASET_API Magphase final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] power Power of the norm, which must be non-negative. Default: 1.0. + explicit Magphase(float power); + + /// \brief Destructor. + ~Magphase() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief MaskAlongAxis TensorTransform. +/// \note Tensor operation to mask the input tensor along axis. +class MaskAlongAxis final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] mask_start Starting position of the mask, which must be non negative. + /// \param[in] mask_width The width of the mask, which must be positive. + /// \param[in] mask_value Value to assign to the masked columns. + /// \param[in] axis Axis to apply masking on (1 for frequency and 2 for time). + MaskAlongAxis(int32_t mask_start, int32_t mask_width, float mask_value, int32_t axis); + + /// \brief Destructor. + ~MaskAlongAxis() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief MaskAlongAxisIID TensorTransform. +/// \note Apply a mask along axis. +class MaskAlongAxisIID final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] mask_param Number of columns to be masked, will be uniformly sampled from [0, mask_param], + /// must be non negative. + /// \param[in] mask_value Value to assign to the masked columns. + /// \param[in] axis Axis to apply masking on (1 for frequency and 2 for time). + MaskAlongAxisIID(int32_t mask_param, float mask_value, int32_t axis); + + /// \brief Destructor. + ~MaskAlongAxisIID() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief MelScale TensorTransform. +/// \notes Convert normal STFT to STFT at the Mel scale. +class DATASET_API MelScale final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] n_mels Number of mel filter, which must be positive. Default: 128. + /// \param[in] sample_rate Sample rate of the signal, the value can't be zero. Default: 16000. + /// \param[in] f_min Minimum frequency, which must be non negative. Default: 0.0. + /// \param[in] f_max Maximum frequency, which must be positive. Default: 0.0, will be set to sample_rate / 2. + /// \param[in] n_stft Number of bins in STFT, which must be positive. Default: 201. + /// \param[in] norm Type of norm, value should be NormType::kSlaney or NormType::kNone. If norm is NormType::kSlaney, + /// divide the triangle mel weight by the width of the mel band. Default: NormType::kNone. + /// \param[in] mel_type Type of mel, value should be MelType::kHtk or MelType::kSlaney. Default: MelType::kHtk. + explicit MelScale(int32_t n_mels = 128, int32_t sample_rate = 16000, float f_min = 0.0, float f_max = 0.0, + int32_t n_stft = 201, NormType norm = NormType::kNone, MelType mel_type = MelType::kHtk); + + /// \brief Destructor. + ~MelScale() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Create a frequency transformation matrix with shape (n_freqs, n_mels). +/// \param[in] output Tensor of the frequency transformation matrix. +/// \param[in] n_freqs Number of frequencies to highlight/apply. +/// \param[in] f_min Minimum frequency (Hz). +/// \param[in] f_max Maximum frequency (Hz). +/// \param[in] n_mels Number of mel filterbanks. +/// \param[in] sample_rate Sample rate of the audio waveform. +/// \param[in] norm Norm to use, can be NormType::kNone or NormType::kSlaney. Default: NormType::kNone. +/// \param[in] mel_type Scale to use, can be MelType::kHtk or MelType::kSlaney. Default: MelType::kHtz. +/// \return Status code. +Status DATASET_API MelscaleFbanks(MSTensor *output, int32_t n_freqs, float f_min, float f_max, int32_t n_mels, + int32_t sample_rate, NormType norm = NormType::kNone, + MelType mel_type = MelType::kHtk); + +/// \brief Create MelSpectrogram for a raw audio signal. +class DATASET_API MelSpectrogram final : public TensorTransform { + public: + /// \param[in] sample_rate Sample rate of audio signal. Default: 16000. + /// \param[in] n_fft Size of FFT, creates `n_fft // 2 + 1` bins. Default: 400. + /// \param[in] win_length Window size. Default: 0, will be set to `n_fft` . + /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will be set to `win_length // 2` . + /// \param[in] f_min Minimum frequency. Default: 0.0. + /// \param[in] f_max Maximum frequency. Default: 0.0. + /// \param[in] pad Two sided padding of signal. Default: 0. + /// \param[in] n_mels Number of mel filterbanks. Default: 128. + /// \param[in] window A function to create a window tensor that is applied/multiplied to each frame/window. + /// Default: WindowType::kHann. + /// \param[in] power Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 for power, etc. + /// Default: 2.0. + /// \param[in] normalized Whether to normalize by magnitude after stft Default: false. + /// \param[in] center Whether to pad waveform on both sides. Default: true. + /// \param[in] pad_mode Controls the padding method used when center is True. Default: BorderType::kReflect. + /// \param[in] onesided Controls whether to return half of results to avoid redundancy. Default: true. + /// \param[in] norm If 'slaney', divide the triangular mel weights by the width of the mel band (area normalization). + /// Default: NormType::kNone. + /// \param[in] mel_scale Scale to use: htk or slaney. Default: MelType::kHtk. + explicit MelSpectrogram(int32_t sample_rate = 16000, int32_t n_fft = 400, int32_t win_length = 0, + int32_t hop_length = 0, float f_min = 0.0, float f_max = 0.0, int32_t pad = 0, + int32_t n_mels = 128, WindowType window = WindowType::kHann, float power = 2.0, + bool normalized = false, bool center = true, BorderType pad_mode = BorderType::kReflect, + bool onesided = true, NormType norm = NormType::kNone, MelType mel_scale = MelType::kHtk); + + /// \brief Destructor. + ~MelSpectrogram() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Create MFCC for a raw audio signal. +class DATASET_API MFCC final : public TensorTransform { + public: + /// \param[in] sample_rate Sample rate of audio signal. Default: 16000. + /// \param[in] n_mfcc Number of mfc coefficients to retain. Default: 40. + /// \param[in] dct_type Type of DCT (discrete cosine transform) to use. Default: 2. + /// \param[in] norm If 'slaney', divide the triangular mel weights by the width of the mel band (area normalization). + /// Default: NormMode::kOrtho. + /// \param[in] log_mels Whether to use log-mel spectrograms instead of db-scaled. Default: false. + /// \param[in] n_fft Size of FFT, creates n_fft // 2 + 1 bins. Default: 400. + /// \param[in] win_length Window size. Default: 0. + /// \param[in] hop_length Length of hop between STFT windows. Default: 0. + /// \param[in] f_min Minimum frequency. Default: 0.0. + /// \param[in] f_max Maximum frequency. Default: 0.0. + /// \param[in] pad Two sided padding of signal. Default: 0. + /// \param[in] n_mels Number of mel filterbanks. Default: 128. + /// \param[in] window A function to create a window tensor that is applied/multiplied to each frame/window. + /// Default: WindowType::kHann. + /// \param[in] power Exponent for the magnitude spectrogram, (must be > 0) e.g., 1 for energy, 2 for power, etc. + /// Default: 2.0. + /// \param[in] normalized Whether to normalize by magnitude after stft. Default: false. + /// \param[in] center Whether to pad waveform on both sides. Default: true. + /// \param[in] pad_mode Controls the padding method used when center is True. Default: BorderType::kReflect. + /// \param[in] onesided Controls whether to return half of results to avoid redundancy. Default: true. + /// \param[in] norm_mel Norm to use. Default: NormType::kNone. + /// \param[in] mel_scale Scale to use: htk or slaney. Default: MelType::kHtk. + explicit MFCC(int32_t sample_rate = 16000, int32_t n_mfcc = 40, int32_t dct_type = 2, + NormMode norm = NormMode::kOrtho, bool log_mels = false, int32_t n_fft = 400, int32_t win_length = 0, + int32_t hop_length = 0, float f_min = 0.0, float f_max = 0.0, int32_t pad = 0, int32_t n_mels = 128, + WindowType window = WindowType::kHann, float power = 2.0, bool normalized = false, bool center = true, + BorderType pad_mode = BorderType::kReflect, bool onesided = true, NormType norm_mel = NormType::kNone, + MelType mel_scale = MelType::kHtk); + + /// \brief Destructor. + ~MFCC() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief MuLawDecoding TensorTransform. +/// \note Decode mu-law encoded signal. +class DATASET_API MuLawDecoding final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] quantization_channels Number of channels, which must be positive. Default: 256. + explicit MuLawDecoding(int32_t quantization_channels = 256); + + /// \brief Destructor. + ~MuLawDecoding() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief MuLawEncoding TensorTransform. +/// \note Encode signal based on mu-law companding. +class DATASET_API MuLawEncoding final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] quantization_channels Number of channels, which must be positive. Default: 256. + explicit MuLawEncoding(int32_t quantization_channels = 256); + + /// \brief Destructor. + ~MuLawEncoding() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Overdrive TensorTransform. +class DATASET_API Overdrive final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] gain Coefficient of overload in dB, in range of [0, 100]. Default: 20.0. + /// \param[in] color Coefficient of translation, in range of [0, 100]. Default: 20.0. + explicit Overdrive(float gain = 20.0, float color = 20.0); + + /// \brief Destructor. + ~Overdrive() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Phaser TensorTransform. +class DATASET_API Phaser final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz). + /// \param[in] gain_in Desired input gain at the boost (or attenuation) in dB. + /// Allowed range of values is [0, 1]. Default: 0.4. + /// \param[in] gain_out Desired output gain at the boost (or attenuation) in dB. + /// Allowed range of values is [0, 1e9]. Default: 0.74. + /// \param[in] delay_ms Desired delay in milli seconds. Allowed range of values is [0, 5]. Default: 3.0. + /// \param[in] decay Desired decay relative to gain-in. Allowed range of values is [0, 0.99]. Default: 0.4. + /// \param[in] mod_speed Modulation speed in Hz. Allowed range of values is [0.1, 2]. Default: 0.5. + /// \param[in] sinusoidal If true, use sinusoidal modulation (preferable for multiple instruments). + /// If false, use triangular modulation (gives single instruments a sharper phasing effect). Default: true. + explicit Phaser(int32_t sample_rate, float gain_in = 0.4, float gain_out = 0.74, float delay_ms = 3.0, + float decay = 0.4, float mod_speed = 0.5, bool sinusoidal = true); + + /// \brief Destructor. + ~Phaser() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief PhaseVocoder TensorTransform +/// \notes Given a STFT tensor, speed up in time without modifying pitch by factor of rate. +class DATASET_API PhaseVocoder final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] rate Speed-up factor. + /// \param[in] phase_advance Expected phase advance in each bin in shape of (freq, 1). + PhaseVocoder(float rate, const MSTensor &phase_advance); + + /// \brief Destructor. + ~PhaseVocoder() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +// \brief Shift the pitch of a waveform by 'n_steps' steps. +class DATASET_API PitchShift final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of "waveform". Default: 0. + /// \param[in] n_steps The (fractional) steps to shift "waveform". Default: 0. + /// \param[in] bins_per_octave The number of steps per octave. Default: 12. + /// \param[in] n_fft Size of FFT, creates "n_fft // 2 + 1" bins. Default: 512. + /// \param[in] win_length Window size. Default: 0, will be set to `n_fft` . + /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will be set to `win_length // 4` . + /// \param[in] window Window tensor that is applied/multiplied to each frame/window. Default: WindowType::kHann. + explicit PitchShift(int32_t sample_rate = 0, int32_t n_steps = 0, int32_t bins_per_octave = 12, int32_t n_fft = 512, + int32_t win_length = 0, int32_t hop_length = 0, WindowType window = WindowType::kHann); + + /// \brief Destructor. + ~PitchShift() override = default; + + protected: + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Resample TensorTransform. +/// \notes Resample a signal from one frequency to another. A sampling method can be given. +class DATASET_API Resample : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] orig_freq The original frequency of the signal, which must be positive. Default: 16000.0. + /// \param[in] new_freq The desired frequency, which must be positive. Default: 16000.0. + /// \param[in] resample_method The resampling method, which can be ResampleMethod::kSincInterpolation + /// and ResampleMethod::kKaiserWindow. Default: ResampleMethod::kSincInterpolation. + /// \param[in] lowpass_filter_width Controls the sharpness of the filter, more means sharper but less efficient, + /// which must be positive. Default: 6. + /// \param[in] rolloff The roll-off frequency of the filter, as a fraction of the Nyquist. Lower values + /// reduce anti-aliasing, but also reduce some of the highest frequencies, range: (0, 1]. Default: 0.99. + /// \param[in] beta The shape parameter used for kaiser window. Default: 14.769656459379492. + explicit Resample(float orig_freq = 16000.0, float new_freq = 16000.0, + ResampleMethod resample_method = ResampleMethod::kSincInterpolation, + int32_t lowpass_filter_width = 6, float rolloff = 0.99, float beta = 14.769656459379492); + + /// \brief Destructor. + ~Resample() override = default; + + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Apply RIAA vinyl playback equalization. +class DATASET_API RiaaBiquad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), + /// can only be one of 44100, 48000, 88200, 96000. + explicit RiaaBiquad(int32_t sample_rate); + + /// \brief Destructor. + ~RiaaBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Apply sliding-window cepstral mean (and optionally variance) normalization per utterance. +class DATASET_API SlidingWindowCmn final : public TensorTransform { + public: + /// \brief Constructor of SlidingWindowCmnOp. + /// \param[in] cmn_window The window in frames for running average CMN computation. Default: 600. + /// \param[in] min_cmn_window The minimum CMN window. Only applicable if center is false, ignored if center + /// is true. Default: 100. + /// \param[in] center If true, use a window centered on the current frame. If false, window is to the left. + /// Default: false. + /// \param[in] norm_vars If true, normalize variance to one. Default: false. + explicit SlidingWindowCmn(int32_t cmn_window = 600, int32_t min_cmn_window = 100, bool center = false, + bool norm_vars = false); + + /// \brief Destructor. + ~SlidingWindowCmn() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Create a spectral centroid from an audio signal. +class DATASET_API SpectralCentroid : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz). + /// \param[in] n_fft Size of FFT, creates n_fft / 2 + 1 bins. Default: 400. + /// \param[in] win_length Window size. Default: 0, will use n_fft. + /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will use win_length / 2. + /// \param[in] pad Two sided padding of signal. Default: 0. + /// \param[in] window Window function that is applied/multiplied to each frame/window, + /// which can be WindowType::kBartlett, WindowType::kBlackman, WindowType::kHamming, + /// WindowType::kHann or WindowType::kKaiser. Default: WindowType::kHann. + explicit SpectralCentroid(int32_t sample_rate, int32_t n_fft = 400, int32_t win_length = 0, int32_t hop_length = 0, + int32_t pad = 0, WindowType window = WindowType::kHann); + + ~SpectralCentroid() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + int32_t sample_rate_; + int32_t n_fft_; + int32_t win_length_; + int32_t hop_length_; + int32_t pad_; + WindowType window_; + struct Data; + std::shared_ptr data_; +}; + +/// \brief Create a spectrogram from an audio signal. +class DATASET_API Spectrogram : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] n_fft Size of FFT, creates n_fft / 2 + 1 bins. Default: 400. + /// \param[in] win_length Window size. Default: 0, will use n_fft. + /// \param[in] hop_length Length of hop between STFT windows. Default: 0, will use win_length / 2. + /// \param[in] pad Two sided padding of signal. Default: 0. + /// \param[in] window Window function that is applied/multiplied to each frame/window, + /// which can be WindowType::kBartlett, WindowType::kBlackman, WindowType::kHamming, + /// WindowType::kHann or WindowType::kKaiser. Default: WindowType::kHann. + /// \param[in] power Exponent for the magnitude spectrogram, which must be greater than or equal to 0. Default: 2.0. + /// \param[in] normalized Whether to normalize by magnitude after stft. Default: false. + /// \param[in] center Whether to pad waveform on both sides. Default: true. + /// \param[in] pad_mode Controls the padding method used when center is true, + /// which can be BorderType::kReflect, BorderType::kConstant, BorderType::kEdge, + /// BorderType::kSymmetric. Default: BorderType::kReflect. + /// \param[in] onesided Controls whether to return half of results to avoid redundancy. Default: true. + explicit Spectrogram(int32_t n_fft = 400, int32_t win_length = 0, int32_t hop_length = 0, int32_t pad = 0, + WindowType window = WindowType::kHann, float power = 2.0, bool normalized = false, + bool center = true, BorderType pad_mode = BorderType::kReflect, bool onesided = true); + + /// \brief Destructor. + ~Spectrogram() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + int32_t n_fft_; + int32_t win_length_; + int32_t hop_length_; + int32_t pad_; + WindowType window_; + float power_; + bool normalized_; + bool center_; + BorderType pad_mode_; + bool onesided_; + struct Data; + std::shared_ptr data_; +}; + +/// \brief TimeMasking TensorTransform. +/// \notes Apply masking to a spectrogram in the time domain. +class DATASET_API TimeMasking final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] iid_masks Whether to apply different masks to each example. + /// \param[in] time_mask_param Maximum possible length of the mask, range: [0, time_length]. Default: 0. + /// Indices uniformly sampled from [0, time_mask_param]. + /// Mask width when iid_masks=true. + /// \param[in] mask_start Mask start when iid_masks=true, range: [0, time_length-time_mask_param]. Default: 0. + /// \param[in] mask_value Mask value. + explicit TimeMasking(bool iid_masks = false, int32_t time_mask_param = 0, int32_t mask_start = 0, + float mask_value = 0.0); + + /// \brief Destructor. + ~TimeMasking() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief TimeStretch TensorTransform +/// \notes Stretch STFT in time at a given rate, without changing the pitch. +class DATASET_API TimeStretch final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] hop_length Length of hop between STFT windows. Default: None, will use ((n_freq - 1) * 2) // 2. + /// \param[in] n_freq Number of filter banks form STFT. Default: 201. + /// \param[in] fixed_rate Rate to speed up or slow down the input in time. + /// Default: std::numeric_limits::quiet_NaN(), will keep the original rate. + explicit TimeStretch(float hop_length = std::numeric_limits::quiet_NaN(), int n_freq = 201, + float fixed_rate = std::numeric_limits::quiet_NaN()); + + /// \brief Destructor. + ~TimeStretch() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Design a treble tone-control effect. +class DATASET_API TrebleBiquad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be zero. + /// \param[in] gain Desired gain at the boost (or attenuation) in dB. + /// \param[in] central_freq Central frequency (in Hz). Default: 3000.0. + /// \param[in] Q Quality factor, https://en.wikipedia.org/wiki/Q_factor, range: (0, 1]. Default: 0.707. + TrebleBiquad(int32_t sample_rate, float gain, float central_freq = 3000.0, float Q = 0.707); + + /// \brief Destructor. + ~TrebleBiquad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Vad TensorTransform. +/// \notes Attempt to trim silent background sounds from the end of the voice recording. +class DATASET_API Vad final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] sample_rate Sample rate of audio signal. + /// \param[in] trigger_level The measurement level used to trigger activity detection. Default: 7.0. + /// \param[in] trigger_time The time constant (in seconds) used to help ignore short sounds. Default: 0.25. + /// \param[in] search_time The amount of audio (in seconds) to search for quieter/shorter sounds to include prior to + /// the detected trigger point. Default: 1.0. + /// \param[in] allowed_gap The allowed gap (in seconds) between quiteter/shorter sounds to include prior to the + /// detected trigger point. Default: 0.25. + /// \param[in] pre_trigger_time The amount of audio (in seconds) to preserve before the trigger point and any found + /// quieter/shorter bursts. Default: 0.0. + /// \param[in] boot_time The time for the initial noise estimate. Default: 0.35. + /// \param[in] noise_up_time Time constant used by the adaptive noise estimator, when the noise level is increasing. + /// Default: 0.1. + /// \param[in] noise_down_time Time constant used by the adaptive noise estimator, when the noise level is decreasing. + /// Default: 0.01. + /// \param[in] noise_reduction_amount The amount of noise reduction used in the detection algorithm. Default: 1.35. + /// \param[in] measure_freq The frequency of the algorithm’s processing. Default: 20.0. + /// \param[in] measure_duration The duration of measurement. Default: 0, use twice the measurement period. + /// \param[in] measure_smooth_time The time constant used to smooth spectral measurements. Default: 0.4. + /// \param[in] hp_filter_freq The "Brick-wall" frequency of high-pass filter applied at the input to the detector + /// algorithm. Default: 50.0. + /// \param[in] lp_filter_freq The "Brick-wall" frequency of low-pass filter applied at the input to the detector + /// algorithm. Default: 6000.0. + /// \param[in] hp_lifter_freq The "Brick-wall" frequency of high-pass lifter applied at the input to the detector + /// algorithm. Default: 150.0. + /// \param[in] lp_lifter_freq The "Brick-wall" frequency of low-pass lifter applied at the input to the detector + /// algorithm. Default: 2000.0. + explicit Vad(int32_t sample_rate, float trigger_level = 7.0, float trigger_time = 0.25, float search_time = 1.0, + float allowed_gap = 0.25, float pre_trigger_time = 0.0, float boot_time = 0.35, + float noise_up_time = 0.1, float noise_down_time = 0.01, float noise_reduction_amount = 1.35, + float measure_freq = 20.0, float measure_duration = 0.0, float measure_smooth_time = 0.4, + float hp_filter_freq = 50.0, float lp_filter_freq = 6000.0, float hp_lifter_freq = 150.0, + float lp_lifter_freq = 2000.0); + + /// \brief Destructor. + ~Vad() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; + +/// \brief Vol TensorTransform. +/// \notes Add a volume to an waveform. +class DATASET_API Vol final : public TensorTransform { + public: + /// \brief Constructor. + /// \param[in] gain Gain value, varies according to the value of gain_type. If gain_type is GainType::kAmplitude, + /// gain must be greater than or equal to zero. If gain_type is GainType::kPower, gain must be greater than zero. + /// If gain_type is GainType::kDb, there is no limit for gain. + /// \param[in] gain_type Type of gain, should be one of [GainType::kAmplitude, GainType::kDb, GainType::kPower]. + explicit Vol(float gain, GainType gain_type = GainType::kAmplitude); + + /// \brief Destructor. + ~Vol() override = default; + + protected: + /// \brief Function to convert TensorTransform object into a TensorOperation object. + /// \return Shared pointer to TensorOperation object. + std::shared_ptr Parse() override; + + private: + struct Data; + std::shared_ptr data_; +}; +} // namespace audio +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_AUDIO_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/config.h b/mindspore-lite/minddata/dataset/include/dataset/config.h index 47d12903..d3802e88 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/config.h +++ b/mindspore-lite/minddata/dataset/include/dataset/config.h @@ -24,7 +24,7 @@ #include "include/api/dual_abi_helper.h" #include "include/api/visible.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Config operations for setting and getting the configuration. namespace config { @@ -159,5 +159,5 @@ bool DATASET_API load(const std::vector &file); inline bool DATASET_API load(const std::string &file) { return load(StringToChar(file)); } } // namespace config } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_CONFIG_H diff --git a/mindspore-lite/minddata/dataset/include/dataset/constants.h b/mindspore-lite/minddata/dataset/include/dataset/constants.h index e2b760bb..8bbf2793 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/constants.h +++ b/mindspore-lite/minddata/dataset/include/dataset/constants.h @@ -22,7 +22,7 @@ #include "include/api/visible.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Various type defines for convenience using uchar = unsigned char; @@ -362,5 +362,5 @@ using row_id_type = int64_t; constexpr uint32_t kCfgAutoTuneInterval = 0; // default number of steps } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_CONSTANTS_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/data_helper.h b/mindspore-lite/minddata/dataset/include/dataset/data_helper.h index 138a06e0..2a5c4881 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/data_helper.h +++ b/mindspore-lite/minddata/dataset/include/dataset/data_helper.h @@ -37,7 +37,7 @@ #include "include/api/dual_abi_helper.h" #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Simple class to do data manipulation, contains helper function to update json files in dataset class DATASET_API DataHelper { @@ -483,5 +483,5 @@ class DATASET_API DataHelper { Status RemoveKeyIF(const std::vector &in_file, const std::vector &key, const std::vector &out_file); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_DATA_HELPER_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/datasets.h b/mindspore-lite/minddata/dataset/include/dataset/datasets.h index ad398173..598bafb3 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/datasets.h +++ b/mindspore-lite/minddata/dataset/include/dataset/datasets.h @@ -36,7 +36,7 @@ #include "include/dataset/samplers.h" #include "include/dataset/text.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CsvBase; class DatasetCache; @@ -6534,5 +6534,5 @@ inline std::shared_ptr DATASET_API Zip(const std::vector(datasets); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_DATASETS_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/execute.h b/mindspore-lite/minddata/dataset/include/dataset/execute.h index a2188807..66ef58b6 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/execute.h +++ b/mindspore-lite/minddata/dataset/include/dataset/execute.h @@ -31,7 +31,7 @@ #include "runtime/hardware_abstract/device_context/device_context_manager.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DeviceResource; class Tensor; @@ -192,5 +192,5 @@ class PyExecute : public Execute { std::vector> *out); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_EXECUTE_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/iterator.h b/mindspore-lite/minddata/dataset/include/dataset/iterator.h index 3dce9d91..9ede03f2 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/iterator.h +++ b/mindspore-lite/minddata/dataset/include/dataset/iterator.h @@ -27,7 +27,7 @@ #include "include/api/status.h" #include "include/api/types.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declare class ExecutionTree; @@ -192,5 +192,5 @@ class DATASET_API PullIterator : public Iterator { std::unique_ptr pull_consumer_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_ITERATOR_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/samplers.h b/mindspore-lite/minddata/dataset/include/dataset/samplers.h index d0061f97..4fae5744 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/samplers.h +++ b/mindspore-lite/minddata/dataset/include/dataset/samplers.h @@ -24,7 +24,7 @@ #include "include/api/status.h" #include "include/dataset/constants.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declare class SamplerObj; @@ -342,5 +342,5 @@ class DATASET_API WeightedRandomSampler final : public Sampler { bool replacement_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_SAMPLERS_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/text.h b/mindspore-lite/minddata/dataset/include/dataset/text.h index 244770b0..d0bb90dc 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/text.h +++ b/mindspore-lite/minddata/dataset/include/dataset/text.h @@ -29,7 +29,7 @@ #include "include/dataset/constants.h" #include "include/dataset/transforms.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TensorOperation; class Vectors; @@ -1088,5 +1088,5 @@ class DATASET_API WhitespaceTokenizer final : public TensorTransform { #endif } // namespace text } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_TEXT_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/transforms.h b/mindspore-lite/minddata/dataset/include/dataset/transforms.h index 24242834..c52c37b6 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/transforms.h +++ b/mindspore-lite/minddata/dataset/include/dataset/transforms.h @@ -27,7 +27,7 @@ #include "include/api/types.h" #include "include/dataset/constants.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TensorOperation; @@ -634,5 +634,5 @@ class DATASET_API Unique final : public TensorTransform { }; } // namespace transforms } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_TRANSFORMS_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/vision.h b/mindspore-lite/minddata/dataset/include/dataset/vision.h old mode 100755 new mode 100644 index 10e00e54..f2652e73 --- a/mindspore-lite/minddata/dataset/include/dataset/vision.h +++ b/mindspore-lite/minddata/dataset/include/dataset/vision.h @@ -30,7 +30,7 @@ #include "include/dataset/transforms.h" #include "include/dataset/vision_lite.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TensorOperation; @@ -2125,5 +2125,5 @@ Status DATASET_API WriteJpeg(const std::string &filename, const mindspore::MSTen Status DATASET_API WritePng(const std::string &filename, const mindspore::MSTensor &image, int compression_level = 6); } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/vision_ascend.h b/mindspore-lite/minddata/dataset/include/dataset/vision_ascend.h index 5093ad59..54e3f661 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/vision_ascend.h +++ b/mindspore-lite/minddata/dataset/include/dataset/vision_ascend.h @@ -27,7 +27,7 @@ #include "include/dataset/constants.h" #include "include/dataset/transforms.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Transform operations for performing computer vision. namespace vision { @@ -202,5 +202,5 @@ class DATASET_API DvppDecodePng final : public TensorTransform { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_ASCEND_H_ diff --git a/mindspore-lite/minddata/dataset/include/dataset/vision_lite.h b/mindspore-lite/minddata/dataset/include/dataset/vision_lite.h index 1fafb68c..69230b98 100644 --- a/mindspore-lite/minddata/dataset/include/dataset/vision_lite.h +++ b/mindspore-lite/minddata/dataset/include/dataset/vision_lite.h @@ -27,7 +27,7 @@ #include "include/dataset/constants.h" #include "include/dataset/transforms.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Transform operations for performing computer vision. namespace vision { @@ -621,5 +621,5 @@ class DATASET_API SwapRedBlue final : public TensorTransform { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_LITE_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/c_func_op.cc b/mindspore-lite/minddata/dataset/kernels/c_func_op.cc index c025dc2b..9dc0be90 100644 --- a/mindspore-lite/minddata/dataset/kernels/c_func_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/c_func_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status CFuncOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -31,4 +31,4 @@ Status CFuncOp::Compute(const TensorRow &input, TensorRow *output) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/c_func_op.h b/mindspore-lite/minddata/dataset/kernels/c_func_op.h index 38484216..b94eb924 100644 --- a/mindspore-lite/minddata/dataset/kernels/c_func_op.h +++ b/mindspore-lite/minddata/dataset/kernels/c_func_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CFuncOp : public TensorOp { public: @@ -46,5 +46,5 @@ class CFuncOp : public TensorOp { std::function c_func_ptr_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_C_FUNC_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/compose_op.cc b/mindspore-lite/minddata/dataset/kernels/data/compose_op.cc index bb19aa4e..f0bdb1a2 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/compose_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/compose_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status ComposeOp::OutputShape(const std::vector &inputs, std::vector &outputs) { std::vector in_shapes = inputs; @@ -55,4 +55,4 @@ Status ComposeOp::Compute(const TensorRow &inputs, TensorRow *outputs) { ComposeOp::ComposeOp(const std::vector> &ops) : ops_(ops) {} } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/data/compose_op.h b/mindspore-lite/minddata/dataset/kernels/data/compose_op.h index 61d199c1..576afbd7 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/compose_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/compose_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ComposeOp : public TensorOp { public: @@ -102,5 +102,5 @@ class ComposeOp : public TensorOp { std::vector> ops_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_COMPOSE_OP_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/concatenate_op.cc b/mindspore-lite/minddata/dataset/kernels/data/concatenate_op.cc index 4eff0249..627a6523 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/concatenate_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/concatenate_op.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status ConcatenateOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -65,4 +65,4 @@ Status ConcatenateOp::OutputShape(const std::vector &inputs, std::v return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/data/concatenate_op.h b/mindspore-lite/minddata/dataset/kernels/data/concatenate_op.h index 2d440ad1..04ac1129 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/concatenate_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/concatenate_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ConcatenateOp : public TensorOp { public: @@ -59,5 +59,5 @@ class ConcatenateOp : public TensorOp { std::shared_ptr append_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CONCATENATE_OP_H diff --git a/mindspore-lite/minddata/dataset/kernels/data/data_utils.cc b/mindspore-lite/minddata/dataset/kernels/data/data_utils.cc index 58860e36..eb948d9b 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/data_utils.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/data_utils.cc @@ -32,7 +32,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/type_cast_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { template Status OneHotEncodingImpl(const std::shared_ptr &input, std::shared_ptr *output, dsize_t num_classes, @@ -897,4 +897,4 @@ Status Unique(const std::shared_ptr &input, std::shared_ptr *out return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/data/data_utils.h b/mindspore-lite/minddata/dataset/kernels/data/data_utils.h index 955cf13b..81fcd023 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/data_utils.h +++ b/mindspore-lite/minddata/dataset/kernels/data/data_utils.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor_row.h" #include "mindspore-lite/minddata/dataset/include/dataset/constants.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Returns Onehot encoding of the input tensor. // Example: if input=2 and numClasses=3, the output is [0 0 1]. @@ -191,5 +191,5 @@ Status UniqueHelper(const std::shared_ptr &input, std::shared_ptr &input, std::shared_ptr *output, std::shared_ptr *output_idx, std::shared_ptr *output_cnt); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_DATA_UTILS_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/duplicate_op.cc b/mindspore-lite/minddata/dataset/kernels/data/duplicate_op.cc index c4f40f21..d0090648 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/duplicate_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/duplicate_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status DuplicateOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -33,4 +33,4 @@ Status DuplicateOp::Compute(const TensorRow &input, TensorRow *output) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/data/duplicate_op.h b/mindspore-lite/minddata/dataset/kernels/data/duplicate_op.h index 3fda07ab..6a2c9e60 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/duplicate_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/duplicate_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DuplicateOp : public TensorOp { public: @@ -38,5 +38,5 @@ class DuplicateOp : public TensorOp { std::string Name() const override { return kDuplicateOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_DUPLICATE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/fill_op.cc b/mindspore-lite/minddata/dataset/kernels/data/fill_op.cc index 3abd8d9f..69270bc9 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/fill_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/fill_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status FillOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -27,4 +27,4 @@ Status FillOp::Compute(const std::shared_ptr &input, std::shared_ptr fill_value_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_FILL_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/mask_op.cc b/mindspore-lite/minddata/dataset/kernels/data/mask_op.cc index 2cd12f9c..888e7cc1 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/mask_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/mask_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status MaskOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -45,4 +45,4 @@ Status MaskOp::OutputType(const std::vector &inputs, std::vector cast_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_MASK_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/no_op.h b/mindspore-lite/minddata/dataset/kernels/data/no_op.h index bb5bebfc..84430ada 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/no_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/no_op.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class NoOp : public TensorOp { public: @@ -35,5 +35,5 @@ class NoOp : public TensorOp { std::string Name() const override { return kNoOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_NO_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/one_hot_op.cc b/mindspore-lite/minddata/dataset/kernels/data/one_hot_op.cc index 29aca51f..cfc1998c 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/one_hot_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/one_hot_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status OneHotOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -44,4 +44,4 @@ Status OneHotOp::OutputShape(const std::vector &inputs, std::vector return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/data/one_hot_op.h b/mindspore-lite/minddata/dataset/kernels/data/one_hot_op.h index 7b36dcd2..0310c05a 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/one_hot_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/one_hot_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class OneHotOp : public TensorOp { public: @@ -42,5 +42,5 @@ class OneHotOp : public TensorOp { double smoothing_rate_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_ONE_HOT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/pad_end_op.cc b/mindspore-lite/minddata/dataset/kernels/data/pad_end_op.cc index 4cc66b67..b402fe29 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/pad_end_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/pad_end_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status PadEndOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -36,4 +36,4 @@ Status PadEndOp::OutputShape(const std::vector &inputs, std::vector return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/data/pad_end_op.h b/mindspore-lite/minddata/dataset/kernels/data/pad_end_op.h index ba570cb9..a1890b4c 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/pad_end_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/pad_end_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class PadEndOp : public TensorOp { public: @@ -44,5 +44,5 @@ class PadEndOp : public TensorOp { std::shared_ptr pad_val_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_PAD_END_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/parse_example_op.cc b/mindspore-lite/minddata/dataset/kernels/data/parse_example_op.cc index 959179c6..5013597f 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/parse_example_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/parse_example_op.cc @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { namespace protobuf = ::google::protobuf; constexpr bool kLittleEndian = __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__; @@ -1459,4 +1459,4 @@ void ParseExampleOp::CheckAndInitPool() { pool_ = std::make_unique(kThreadPoolSize); } } -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset diff --git a/mindspore-lite/minddata/dataset/kernels/data/parse_example_op.h b/mindspore-lite/minddata/dataset/kernels/data/parse_example_op.h index 2d31d263..56d2b244 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/parse_example_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/parse_example_op.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/engine/data_schema.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr int kThreadPoolSize = 32; @@ -72,5 +72,5 @@ class ParseExampleOp : public TensorOp { std::unordered_map column_name_id_map_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_PARSE_EXAMPLE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/random_apply_op.cc b/mindspore-lite/minddata/dataset/kernels/data/random_apply_op.cc index f1e55382..4274a253 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/random_apply_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/random_apply_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandomApplyOp::RandomApplyOp(const std::vector> &ops, double prob) : prob_(prob), rand_double_(0.0, 1.0) { @@ -63,4 +63,4 @@ Status RandomApplyOp::Compute(const TensorRow &input, TensorRow *output) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/data/random_apply_op.h b/mindspore-lite/minddata/dataset/kernels/data/random_apply_op.h index b9bc0f5a..baa3de1b 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/random_apply_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/random_apply_op.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomApplyOp : public RandomTensorOp { public: @@ -73,5 +73,5 @@ class RandomApplyOp : public RandomTensorOp { std::uniform_real_distribution rand_double_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_RANDOM_APPLY_OP_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/random_choice_op.cc b/mindspore-lite/minddata/dataset/kernels/data/random_choice_op.cc index eccb9f9f..e090f1b3 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/random_choice_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/random_choice_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandomChoiceOp::RandomChoiceOp(const std::vector> &ops) : ops_(ops), rand_int_(0, ops.size() - 1) {} @@ -86,4 +86,4 @@ Status RandomChoiceOp::Compute(const TensorRow &input, TensorRow *output) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/data/random_choice_op.h b/mindspore-lite/minddata/dataset/kernels/data/random_choice_op.h index 67c55ef2..38c715ef 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/random_choice_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/random_choice_op.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/compose_op.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomChoiceOp : public RandomTensorOp { public: @@ -71,5 +71,5 @@ class RandomChoiceOp : public RandomTensorOp { std::uniform_int_distribution rand_int_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_RANDOM_CHOICE_OP_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/slice_op.cc b/mindspore-lite/minddata/dataset/kernels/data/slice_op.cc index 96db1563..4120a26f 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/slice_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/slice_op.cc @@ -19,11 +19,11 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status SliceOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); return input->Slice(output, slice_options_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/data/slice_op.h b/mindspore-lite/minddata/dataset/kernels/data/slice_op.h index 70097dde..19432343 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/slice_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/slice_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor_helpers.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SliceOp : public TensorOp { public: @@ -51,5 +51,5 @@ class SliceOp : public TensorOp { std::vector slice_options_ = {}; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_SLICE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/to_float16_op.cc b/mindspore-lite/minddata/dataset/kernels/data/to_float16_op.cc index 6543e757..2d153580 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/to_float16_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/to_float16_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status ToFloat16Op::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -33,4 +33,4 @@ Status ToFloat16Op::OutputType(const std::vector &inputs, std::vector< return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/data/to_float16_op.h b/mindspore-lite/minddata/dataset/kernels/data/to_float16_op.h index dd107d3f..de695192 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/to_float16_op.h +++ b/mindspore-lite/minddata/dataset/kernels/data/to_float16_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ToFloat16Op : public TensorOp { public: @@ -44,5 +44,5 @@ class ToFloat16Op : public TensorOp { std::string Name() const override { return kToFloat16Op; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_DATA_TO_FLOAT16_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/data/type_cast_op.cc b/mindspore-lite/minddata/dataset/kernels/data/type_cast_op.cc index 97bb5860..cd0779b0 100644 --- a/mindspore-lite/minddata/dataset/kernels/data/type_cast_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/data/type_cast_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { TypeCastOp::TypeCastOp(const DataType &new_type) : type_(new_type) {} @@ -37,4 +37,4 @@ Status TypeCastOp::OutputType(const std::vector &inputs, std::vector &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -27,4 +27,4 @@ Status AdjustBrightnessOp::Compute(const std::shared_ptr &input, std::sh return AdjustBrightness(input, output, brightness_factor_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/adjust_brightness_op.h b/mindspore-lite/minddata/dataset/kernels/image/adjust_brightness_op.h index e02ebf3d..edf3de67 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/adjust_brightness_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/adjust_brightness_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class AdjustBrightnessOp : public TensorOp { public: @@ -41,5 +41,5 @@ class AdjustBrightnessOp : public TensorOp { float brightness_factor_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_ADJUST_BRIGHTNESS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/adjust_contrast_op.cc b/mindspore-lite/minddata/dataset/kernels/image/adjust_contrast_op.cc index 19712d28..4c77e524 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/adjust_contrast_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/adjust_contrast_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status AdjustContrastOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -27,4 +27,4 @@ Status AdjustContrastOp::Compute(const std::shared_ptr &input, std::shar return AdjustContrast(input, output, contrast_factor_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/adjust_contrast_op.h b/mindspore-lite/minddata/dataset/kernels/image/adjust_contrast_op.h index 8ca54b6a..5c95b077 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/adjust_contrast_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/adjust_contrast_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class AdjustContrastOp : public TensorOp { public: @@ -41,5 +41,5 @@ class AdjustContrastOp : public TensorOp { float contrast_factor_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_CONTRAST_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/adjust_gamma_op.cc b/mindspore-lite/minddata/dataset/kernels/image/adjust_gamma_op.cc index 6a0f597b..3a2dbf2c 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/adjust_gamma_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/adjust_gamma_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr float AdjustGammaOp::kGain = 1.0; @@ -40,4 +40,4 @@ Status AdjustGammaOp::Compute(const std::shared_ptr &input, std::shared_ } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/adjust_gamma_op.h b/mindspore-lite/minddata/dataset/kernels/image/adjust_gamma_op.h index 9c33d946..b35c7faf 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/adjust_gamma_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/adjust_gamma_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class AdjustGammaOp : public TensorOp { public: @@ -50,5 +50,5 @@ class AdjustGammaOp : public TensorOp { float gain_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_ADJUST_GAMMA_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/adjust_hue_op.cc b/mindspore-lite/minddata/dataset/kernels/image/adjust_hue_op.cc index 8f3484b2..0e10aea0 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/adjust_hue_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/adjust_hue_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status AdjustHueOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -27,4 +27,4 @@ Status AdjustHueOp::Compute(const std::shared_ptr &input, std::shared_pt return AdjustHue(input, output, hue_factor_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/adjust_hue_op.h b/mindspore-lite/minddata/dataset/kernels/image/adjust_hue_op.h index 1148011a..e026fd21 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/adjust_hue_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/adjust_hue_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class AdjustHueOp : public TensorOp { public: @@ -41,5 +41,5 @@ class AdjustHueOp : public TensorOp { float hue_factor_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_ADJUST_HUE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/adjust_saturation_op.cc b/mindspore-lite/minddata/dataset/kernels/image/adjust_saturation_op.cc index 20e9eb39..7674dc1d 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/adjust_saturation_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/adjust_saturation_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status AdjustSaturationOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -27,4 +27,4 @@ Status AdjustSaturationOp::Compute(const std::shared_ptr &input, std::sh return AdjustSaturation(input, output, saturation_factor_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/adjust_saturation_op.h b/mindspore-lite/minddata/dataset/kernels/image/adjust_saturation_op.h index 26a1e79d..6b62bf17 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/adjust_saturation_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/adjust_saturation_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class AdjustSaturationOp : public TensorOp { public: @@ -41,5 +41,5 @@ class AdjustSaturationOp : public TensorOp { float saturation_factor_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_ADJUST_SATURATION_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/affine_op.cc b/mindspore-lite/minddata/dataset/kernels/image/affine_op.cc index d2b4704b..99740a38 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/affine_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/affine_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/math_utils.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const InterpolationMode AffineOp::kDefInterpolation = InterpolationMode::kNearestNeighbour; const float_t AffineOp::kDegrees = 0.0; @@ -60,4 +60,4 @@ Status AffineOp::Compute(const std::shared_ptr &input, std::shared_ptr fill_value_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_AFFINE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/auto_augment_op.cc b/mindspore-lite/minddata/dataset/kernels/image/auto_augment_op.cc index fdbb89fb..a898d94e 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/auto_augment_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/auto_augment_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { AutoAugmentOp::AutoAugmentOp(AutoAugmentPolicy policy, InterpolationMode interpolation, const std::vector &fill_value) @@ -152,4 +152,4 @@ Space AutoAugmentOp::GetSpace(int32_t num_bins, const std::vector &imag return space; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/auto_augment_op.h b/mindspore-lite/minddata/dataset/kernels/image/auto_augment_op.h index 13290954..fa8aa519 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/auto_augment_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/auto_augment_op.h @@ -36,7 +36,7 @@ typedef std::vector>> Transforms; typedef std::map, bool>> Space; -namespace mindspore { +namespace mindspore::lite { namespace dataset { class AutoAugmentOp : public RandomTensorOp { public: @@ -62,5 +62,5 @@ class AutoAugmentOp : public RandomTensorOp { Transforms transforms_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_AUTO_AUGMENT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/auto_contrast_op.cc b/mindspore-lite/minddata/dataset/kernels/image/auto_contrast_op.cc index 240cd271..670c9e35 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/auto_contrast_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/auto_contrast_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const float AutoContrastOp::kCutOff = 0.0; const std::vector AutoContrastOp::kIgnore = {}; @@ -28,4 +28,4 @@ Status AutoContrastOp::Compute(const std::shared_ptr &input, std::shared return AutoContrast(input, output, cutoff_, ignore_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/auto_contrast_op.h b/mindspore-lite/minddata/dataset/kernels/image/auto_contrast_op.h index 63256658..5a793872 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/auto_contrast_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/auto_contrast_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class AutoContrastOp : public TensorOp { public: @@ -54,5 +54,5 @@ class AutoContrastOp : public TensorOp { std::vector ignore_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_AUTO_CONTRAST_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/bounding_box.cc b/mindspore-lite/minddata/dataset/kernels/image/bounding_box.cc index 133df531..9b4e39bf 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/bounding_box.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/bounding_box.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const uint8_t kNumOfCols = 4; const float kEpsilon = 1e-4; @@ -231,4 +231,4 @@ Status BoundingBox::UpdateBBoxesForResize(const TensorPtr &bbox_list, size_t bbo return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/bounding_box.h b/mindspore-lite/minddata/dataset/kernels/image/bounding_box.h index 06677f68..e581fde2 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/bounding_box.h +++ b/mindspore-lite/minddata/dataset/kernels/image/bounding_box.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor_row.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class BoundingBox { public: @@ -133,5 +133,5 @@ class BoundingBox { bbox_float height_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_BOUNDING_BOX_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/bounding_box_augment_op.cc b/mindspore-lite/minddata/dataset/kernels/image/bounding_box_augment_op.cc index 9ff49b66..7985b240 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/bounding_box_augment_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/bounding_box_augment_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/kernels/image/resize_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const float BoundingBoxAugmentOp::kDefRatio = 0.3; @@ -74,4 +74,4 @@ Status BoundingBoxAugmentOp::Compute(const TensorRow &input, TensorRow *output) return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/bounding_box_augment_op.h b/mindspore-lite/minddata/dataset/kernels/image/bounding_box_augment_op.h index cecb3e92..923e45ca 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/bounding_box_augment_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/bounding_box_augment_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class BoundingBoxAugmentOp : public RandomTensorOp { public: @@ -59,5 +59,5 @@ class BoundingBoxAugmentOp : public RandomTensorOp { std::shared_ptr transform_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_BOUNDING_BOX_AUGMENT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/center_crop_op.cc b/mindspore-lite/minddata/dataset/kernels/image/center_crop_op.cc index 9fe70871..e2514d6b 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/center_crop_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/center_crop_op.cc @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int32_t CenterCropOp::kDefWidth = 0; @@ -140,4 +140,4 @@ Status CenterCropOp::OutputShape(const std::vector &inputs, std::ve return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/center_crop_op.h b/mindspore-lite/minddata/dataset/kernels/image/center_crop_op.h index 6cf77a2a..b9ff6834 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/center_crop_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/center_crop_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CenterCropOp : public TensorOp { public: @@ -52,5 +52,5 @@ class CenterCropOp : public TensorOp { int32_t crop_wid_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_CENTER_CROP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/convert_color_op.cc b/mindspore-lite/minddata/dataset/kernels/image/convert_color_op.cc index 3a1c9963..b54c4ffb 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/convert_color_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/convert_color_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { ConvertColorOp::ConvertColorOp(ConvertMode convert_mode) : convert_mode_(convert_mode) {} @@ -30,4 +30,4 @@ Status ConvertColorOp::Compute(const std::shared_ptr &input, std::shared return ConvertColor(input, output, convert_mode_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/convert_color_op.h b/mindspore-lite/minddata/dataset/kernels/image/convert_color_op.h index 0ecd7c2b..750044d3 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/convert_color_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/convert_color_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ConvertColorOp : public TensorOp { public: @@ -41,5 +41,5 @@ class ConvertColorOp : public TensorOp { ConvertMode convert_mode_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_CONVERT_COLOR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/crop_op.cc b/mindspore-lite/minddata/dataset/kernels/image/crop_op.cc index 13a47efa..e3162143 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/crop_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/crop_op.cc @@ -22,7 +22,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status CropOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -53,4 +53,4 @@ Status CropOp::OutputShape(const std::vector &inputs, std::vector &input, std::shared_ptr &inputs, std::vector &inputs, std::vecto return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/decode_video_op.h b/mindspore-lite/minddata/dataset/kernels/image/decode_video_op.h index d4994479..ea8941de 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/decode_video_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/decode_video_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DecodeVideoOp : public TensorOp { public: @@ -41,5 +41,5 @@ class DecodeVideoOp : public TensorOp { std::string Name() const override { return kDecodeVideoOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DECODE_VIDEO_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.cc index c9c3d526..e12f7f33 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.cc @@ -23,7 +23,7 @@ #include "utils/ms_context.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace { #if defined(BUILD_LITE) @@ -757,4 +757,4 @@ APP_ERROR AclAdapter::DestroyIntArray(void *int_array) { } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.h b/mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.h index f8a51dfe..97801d03 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.h +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/AclLiteError.h" #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/ErrorCode.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { class AclAdapter { public: static AclAdapter &GetInstance(); @@ -263,5 +263,5 @@ class AclAdapter { DestroyIntArrayFunObj destroy_int_array_fun_obj_; #endif }; -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_ACL_ADAPTER_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_crop_jpeg_op.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_crop_jpeg_op.cc index ffef87fd..8f164239 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_crop_jpeg_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_crop_jpeg_op.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status DvppCropJpegOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -159,4 +159,4 @@ Status DvppCropJpegOp::SetAscendResource(const std::shared_ptr & return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_crop_jpeg_op.h b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_crop_jpeg_op.h index d976b24a..0140cca8 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_crop_jpeg_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_crop_jpeg_op.h @@ -31,7 +31,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DvppCropJpegOp : public TensorOp { public: @@ -56,5 +56,5 @@ class DvppCropJpegOp : public TensorOp { std::shared_ptr processor_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_CROP_JPEG_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_jpeg_op.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_jpeg_op.cc index 0208ae2b..a89a9805 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_jpeg_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_jpeg_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Compute() will be called when context=="Ascend310" Status DvppDecodeJpegOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { @@ -145,4 +145,4 @@ Status DvppDecodeJpegOp::OutputShape(const std::vector &inputs, std return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_jpeg_op.h b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_jpeg_op.h index 35135eca..1163f65a 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_jpeg_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_jpeg_op.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DvppDecodeJpegOp : public TensorOp { public: @@ -53,5 +53,5 @@ class DvppDecodeJpegOp : public TensorOp { std::shared_ptr processor_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_JPEG_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_png_op.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_png_op.cc index b60d982b..04f98952 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_png_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_png_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status DvppDecodePngOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -140,4 +140,4 @@ Status DvppDecodePngOp::SetAscendResource(const std::shared_ptr return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_png_op.h b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_png_op.h index f329f87d..549f87cc 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_png_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_png_op.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DvppDecodePngOp : public TensorOp { public: @@ -53,5 +53,5 @@ class DvppDecodePngOp : public TensorOp { std::shared_ptr processor_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_PNG_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_resize_crop_jpeg_op.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_resize_crop_jpeg_op.cc index 0ace3a58..7a6f475f 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_resize_crop_jpeg_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_resize_crop_jpeg_op.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status DvppDecodeResizeCropJpegOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { @@ -148,4 +148,4 @@ Status DvppDecodeResizeCropJpegOp::SetAscendResource(const std::shared_ptr processor_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_RESIZE_CROP_JPEG_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_resize_jpeg_op.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_resize_jpeg_op.cc index a85eb8d6..d22ecb35 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_resize_jpeg_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_resize_jpeg_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status DvppDecodeResizeJpegOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { @@ -141,4 +141,4 @@ Status DvppDecodeResizeJpegOp::SetAscendResource(const std::shared_ptr processor_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_RESIZE_JPEG_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_video_op.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_video_op.cc index 46b3031a..917c0486 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_video_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_video_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/acl_adapter.h" #include "mindspore-lite/minddata/dataset/util/path.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const VdecOutputFormat DvppDecodeVideoOp::kDefVdecOutputFormat = VdecOutputFormat::kYuvSemiplanar420; const char DvppDecodeVideoOp::kDefOutput[] = "./output"; @@ -87,4 +87,4 @@ Status DvppDecodeVideoOp::OutputShape(const std::vector &inputs, st return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_video_op.h b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_video_op.h index 9e240388..ebe314ed 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_video_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_decode_video_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DvppDecodeVideoOp : public TensorOp { public: @@ -68,5 +68,5 @@ class DvppDecodeVideoOp : public TensorOp { std::string output_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_DECODE_VIDEO_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_normalize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_normalize_op.cc index c66f4fdd..78ff72a5 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_normalize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_normalize_op.cc @@ -18,7 +18,7 @@ #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status DvppNormalizeOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { const TensorShape dvpp_shape({1, 1, 1}); @@ -38,4 +38,4 @@ Status DvppNormalizeOp::Compute(const std::shared_ptr &input, std: Status DvppNormalizeOp::SetAscendResource(const std::shared_ptr &resource) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_normalize_op.h b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_normalize_op.h index 00544d7a..0090b6fa 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_normalize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_normalize_op.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DvppNormalizeOp : public TensorOp { public: @@ -47,5 +47,5 @@ class DvppNormalizeOp : public TensorOp { std::vector std_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_NORMALIZE_JPEG_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_resize_jpeg_op.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_resize_jpeg_op.cc index 8b288abe..a05b1b35 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_resize_jpeg_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_resize_jpeg_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/CommonDataType.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status DvppResizeJpegOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -159,4 +159,4 @@ Status DvppResizeJpegOp::OutputShape(const std::vector &inputs, std return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_resize_jpeg_op.h b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_resize_jpeg_op.h index 1e6b98dc..2211e1fc 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_resize_jpeg_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend310/dvpp_resize_jpeg_op.h @@ -31,7 +31,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class DvppResizeJpegOp : public TensorOp { public: @@ -57,5 +57,5 @@ class DvppResizeJpegOp : public TensorOp { std::shared_ptr processor_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_DVPP_RESIZE_JPEG_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/acl_env_guard.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/acl_env_guard.cc index 10c3c393..f02e5132 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/acl_env_guard.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/acl_env_guard.cc @@ -19,7 +19,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { std::shared_ptr AclEnvGuard::global_acl_env_ = nullptr; std::mutex AclEnvGuard::global_acl_env_mutex_; @@ -91,4 +91,4 @@ std::shared_ptr AclEnvGuard::GetAclEnv() { } return acl_env; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/acl_env_guard.h b/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/acl_env_guard.h index 66bd7c87..e2291898 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/acl_env_guard.h +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/acl_env_guard.h @@ -20,7 +20,7 @@ #include #include "acl/acl_base.h" -namespace mindspore { +namespace mindspore::lite { class __attribute__((visibility("default"))) AclInitAdapter { public: static AclInitAdapter &GetInstance(); @@ -49,5 +49,5 @@ class __attribute__((visibility("default"))) AclEnvGuard { aclError errno_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_ACL_ACL_ENV_GUARD_H diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/dvpp_image_utils.cc b/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/dvpp_image_utils.cc index adaf1891..35c8ce4b 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/dvpp_image_utils.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/dvpp_image_utils.cc @@ -70,7 +70,7 @@ #include "acldvppop/acldvpp_warp_perspective.h" #include "plugin/ascend/res_manager/stream_manager/ascend_stream_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { APP_ERROR DvppAdjustBrightness(const std::shared_ptr &input, std::shared_ptr *output, float factor) { @@ -2837,4 +2837,4 @@ APP_ERROR DestroyIntArray(void *int_array) { return APP_ERR_OK; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/dvpp_image_utils.h b/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/dvpp_image_utils.h index 7e242228..3264cc71 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/dvpp_image_utils.h +++ b/mindspore-lite/minddata/dataset/kernels/image/dvpp/utils/dvpp_image_utils.h @@ -40,7 +40,7 @@ #include "acldvppop/acldvpp_base.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int kInvalidInterpolationMode = 100; const int kInvalidPaddingMode = 101; @@ -352,5 +352,5 @@ APP_ERROR DestroyFloatArray(void *float_array); APP_ERROR DestroyIntArray(void *int_array); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_DVPP_UTILS_DVPP_IMAGE_UTILS_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/equalize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/equalize_op.cc index 723bab0f..040f21a3 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/equalize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/equalize_op.cc @@ -17,7 +17,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // only supports RGB images Status EqualizeOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { @@ -25,4 +25,4 @@ Status EqualizeOp::Compute(const std::shared_ptr &input, std::shared_ptr return Equalize(input, output); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/equalize_op.h b/mindspore-lite/minddata/dataset/kernels/image/equalize_op.h index 5802bb4c..56167957 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/equalize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/equalize_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class EqualizeOp : public TensorOp { public: @@ -37,5 +37,5 @@ class EqualizeOp : public TensorOp { std::string Name() const override { return kEqualizeOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_EQUALIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/erase_op.cc b/mindspore-lite/minddata/dataset/kernels/image/erase_op.cc index cf7755a6..03323089 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/erase_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/erase_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // constructor EraseOp::EraseOp(int32_t top, int32_t left, int32_t height, int32_t width, const std::vector &value, @@ -40,4 +40,4 @@ Status EraseOp::Compute(const std::shared_ptr &input, std::shared_ptr T parse_bytes(const uint8_t *buf, bool intel_align); @@ -158,4 +158,4 @@ int ExifInfo::parseOrientation(const unsigned char *data, unsigned len) { return parseExif(data + offset, len - offset); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/exif_utils.h b/mindspore-lite/minddata/dataset/kernels/image/exif_utils.h index 7a358475..ced28b9f 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/exif_utils.h +++ b/mindspore-lite/minddata/dataset/kernels/image/exif_utils.h @@ -17,7 +17,7 @@ #ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_EXIF_H_ #define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_EXIF_H_ -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ExifInfo { @@ -25,5 +25,5 @@ class ExifInfo { int parseOrientation(const unsigned char *data, unsigned len); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_EXIF_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/gaussian_blur_op.cc b/mindspore-lite/minddata/dataset/kernels/image/gaussian_blur_op.cc index 0d37747d..62a612a0 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/gaussian_blur_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/gaussian_blur_op.cc @@ -22,7 +22,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status GaussianBlurOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -30,4 +30,4 @@ Status GaussianBlurOp::Compute(const std::shared_ptr &input, std::shared return GaussianBlur(input, output, kernel_x_, kernel_y_, sigma_x_, sigma_y_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/gaussian_blur_op.h b/mindspore-lite/minddata/dataset/kernels/image/gaussian_blur_op.h index 01ba7600..cf96ff6b 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/gaussian_blur_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/gaussian_blur_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class GaussianBlurOp : public TensorOp { public: @@ -59,5 +59,5 @@ class GaussianBlurOp : public TensorOp { float sigma_y_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_GAUSSIAN_BLUR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/horizontal_flip_op.cc b/mindspore-lite/minddata/dataset/kernels/image/horizontal_flip_op.cc index 784a37dc..33616614 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/horizontal_flip_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/horizontal_flip_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status HorizontalFlipOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -52,4 +52,4 @@ Status HorizontalFlipOp::Compute(const std::shared_ptr &input, std::shar return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/horizontal_flip_op.h b/mindspore-lite/minddata/dataset/kernels/image/horizontal_flip_op.h index e34e0bee..622242a3 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/horizontal_flip_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/horizontal_flip_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class HorizontalFlipOp : public TensorOp { public: @@ -36,5 +36,5 @@ class HorizontalFlipOp : public TensorOp { std::string Name() const override { return kHorizontalFlipOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_HORIZONTAL_FLIP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/hwc_to_chw_op.cc b/mindspore-lite/minddata/dataset/kernels/image/hwc_to_chw_op.cc index a9e32990..c357abab 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/hwc_to_chw_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/hwc_to_chw_op.cc @@ -22,7 +22,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status HwcToChwOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -44,4 +44,4 @@ Status HwcToChwOp::OutputShape(const std::vector &inputs, std::vect return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/hwc_to_chw_op.h b/mindspore-lite/minddata/dataset/kernels/image/hwc_to_chw_op.h index 9bfedf1a..0869973c 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/hwc_to_chw_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/hwc_to_chw_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class HwcToChwOp : public TensorOp { public: @@ -35,5 +35,5 @@ class HwcToChwOp : public TensorOp { std::string Name() const override { return kHwcToChwOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_HWC_TO_CHW_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/image_utils.cc b/mindspore-lite/minddata/dataset/kernels/image/image_utils.cc index f8a76f90..a72d271e 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/image_utils.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/image_utils.cc @@ -45,7 +45,7 @@ const int32_t DOUBLING_FACTOR = 2; // used as multiplier with MAX_INT_ const int32_t DEFAULT_NUM_HEIGHT = 1; const int32_t DEFAULT_NUM_WIDTH = 1; -namespace mindspore { +namespace mindspore::lite { namespace dataset { int GetCVInterpolationMode(InterpolationMode mode) { switch (mode) { @@ -2864,4 +2864,4 @@ Status CheckUnsupportedImage(const std::shared_ptr &image) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/image_utils.h b/mindspore-lite/minddata/dataset/kernels/image/image_utils.h old mode 100755 new mode 100644 index 664c3594..c44fe582 --- a/mindspore-lite/minddata/dataset/kernels/image/image_utils.h +++ b/mindspore-lite/minddata/dataset/kernels/image/image_utils.h @@ -41,7 +41,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr dsize_t kChannelIndexHWC = 2; // images are hwc, so index 2 represents number of channels constexpr dsize_t kChannelIndexCHW = 0; // images are chw, so index 0 represents number of channels @@ -597,5 +597,5 @@ Status DumpImageAndAppendStatus(const std::shared_ptr &image, const Stat /// \return Status code. Status CheckUnsupportedImage(const std::shared_ptr &image); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_IMAGE_UTILS_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/invert_op.cc b/mindspore-lite/minddata/dataset/kernels/image/invert_op.cc index e59c819c..ce25b902 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/invert_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/invert_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // only supports RGB images Status InvertOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { @@ -32,4 +32,4 @@ Status InvertOp::Compute(const std::shared_ptr &input, std::shared_ptr buffer(ksize + 1); @@ -281,4 +281,4 @@ bool Canny(const LiteMat &src, LiteMat &dst, double low_thresh, double high_thre return true; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/gaussian_blur.cc b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/gaussian_blur.cc index b7ee90c3..d9f5c2c9 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/gaussian_blur.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/gaussian_blur.cc @@ -26,7 +26,7 @@ #endif #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { static void GetGaussianKernel(float *kernel, int size, double sigma) { int n = (size - 1) / 2; @@ -86,4 +86,4 @@ bool GaussianBlur(const LiteMat &src, LiteMat &dst, const std::vector &ksiz return ConvRowCol(src, kx, ky, dst, src.data_type_, pad_type); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/image_process.cc b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/image_process.cc index b0298d50..74c2ab51 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/image_process.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/image_process.cc @@ -29,7 +29,7 @@ #include #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr uint32_t kR2Gray = 9798; constexpr uint32_t kG2Gray = 19235; @@ -2157,4 +2157,4 @@ bool HWC2CHW(LiteMat &src, LiteMat &dst) { return true; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/image_process.h b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/image_process.h index 034bbe75..fda82bc9 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/image_process.h +++ b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/image_process.h @@ -25,7 +25,7 @@ #include "lite_cv/lite_mat.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #define CV_PI 3.1415926535897932384626433832795 #define IM_TOOL_EXIF_ORIENTATION_0_DEG 1 @@ -653,5 +653,5 @@ bool DATASET_API ResizePreserveARWithFiller(LiteMat &src, LiteMat &dst, int h, i /// \return Return true if transform successfully. bool DATASET_API HWC2CHW(LiteMat &src, LiteMat &dst); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // IMAGE_PROCESS_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/lite_mat.cc b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/lite_mat.cc index afb12019..3fb71dd3 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/lite_mat.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/lite_mat.cc @@ -23,7 +23,7 @@ #include #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { LiteMat::LiteMat() { data_ptr_ = nullptr; @@ -746,4 +746,4 @@ bool Multiply(const LiteMat &src_a, const LiteMat &src_b, LiteMat *dst) { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/lite_mat.h b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/lite_mat.h index 782d9bfb..97decc32 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/lite_mat.h +++ b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/lite_mat.h @@ -22,7 +22,7 @@ #include "include/api/visible.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr int kAlign = 16; constexpr size_t kMaxDims = 3; @@ -477,5 +477,5 @@ bool DATASET_API Multiply(const LiteMat &src_a, const LiteMat &src_b, LiteMat *d } while (false) } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINI_MAT_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/warp_affine.cc b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/warp_affine.cc index 95d52e33..afd4509c 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/lite_cv/warp_affine.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/lite_cv/warp_affine.cc @@ -26,7 +26,7 @@ constexpr int kTabSz = 1 << kBits; constexpr int kTabSz2 = kTabSz * kTabSz; constexpr int kRemapScale = 1 << 15; -namespace mindspore { +namespace mindspore::lite { namespace dataset { static int16_t BWBlock_i[kTabSz2][2][2]; @@ -575,4 +575,4 @@ bool WarpPerspectiveBilinear(const LiteMat &src, LiteMat &dst, const LiteMat &M, } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.cc b/mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.cc index dd6b0ac1..6b629d7d 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.cc @@ -44,7 +44,7 @@ constexpr int64_t hw_shape = 2; constexpr int64_t hwc_rank = 3; #define MAX_INT_PRECISION 16777216 // float int precision is 16777216 -namespace mindspore { +namespace mindspore::lite { namespace dataset { #if defined(ENABLE_CLOUD_FUSION_INFERENCE) bool IsNonEmptyPNG(const std::shared_ptr &input) { @@ -1216,4 +1216,4 @@ Status HwcToChw(const std::shared_ptr &input, std::shared_ptr *o return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.h b/mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.h index 238d0c3f..7f122d43 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.h +++ b/mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.h @@ -38,7 +38,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr dsize_t kChannelIndexHWC = 2; // images are hwc, so index 2 represents number of channels constexpr dsize_t kChannelIndexCHW = 0; // images are chw, so index 0 represents number of channels @@ -235,5 +235,5 @@ Status ValidateImageRank(const std::string &op_name, int32_t rank); /// \param output: Tensor of shape or and same input type. Status HwcToChw(const std::shared_ptr &input, std::shared_ptr *output); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_LITE_IMAGE_UTILS_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/math_utils.cc b/mindspore-lite/minddata/dataset/kernels/image/math_utils.cc index a944dab1..4cb46800 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/math_utils.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/math_utils.cc @@ -20,7 +20,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status ComputeUpperAndLowerPercentiles(std::vector *hist, int32_t hi_p, int32_t low_p, int32_t *hi, int32_t *lo) { @@ -84,4 +84,4 @@ Status GenerateRealNumber(float_t a, float_t b, std::mt19937 *rnd, float_t *resu return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/math_utils.h b/mindspore-lite/minddata/dataset/kernels/image/math_utils.h index 116cf112..8f564f41 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/math_utils.h +++ b/mindspore-lite/minddata/dataset/kernels/image/math_utils.h @@ -24,7 +24,7 @@ #define CV_PI 3.1415926535897932384626433832795 -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Returns lower and upper pth percentiles of the input histogram. /// \param[in] hist: Input histogram (mutates the histogram for computation purposes) @@ -47,5 +47,5 @@ Status DegreesToRadians(float_t degrees, float_t *radians_target); /// \param[out] result: Random number in range [a,b) Status GenerateRealNumber(float_t a, float_t b, std::mt19937 *rnd, float_t *result); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_MATH_UTILS_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/mixup_batch_op.cc b/mindspore-lite/minddata/dataset/kernels/image/mixup_batch_op.cc index dd564f6e..7f53a9e3 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/mixup_batch_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/mixup_batch_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr size_t kExpectedImageShapeSize = 4; constexpr size_t kMaxLabelShapeSize = 3; @@ -168,4 +168,4 @@ void MixUpBatchOp::Print(std::ostream &out) const { << "alpha: " << alpha_ << "\n"; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/mixup_batch_op.h b/mindspore-lite/minddata/dataset/kernels/image/mixup_batch_op.h index e00df80a..defdeb1c 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/mixup_batch_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/mixup_batch_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class MixUpBatchOp : public RandomTensorOp { public: @@ -48,5 +48,5 @@ class MixUpBatchOp : public RandomTensorOp { float alpha_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_MIXUP_BATCH_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/normalize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/normalize_op.cc index 9212bd2c..964d44dd 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/normalize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/normalize_op.cc @@ -27,7 +27,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { NormalizeOp::NormalizeOp(std::vector mean, std::vector std, bool is_hwc) : mean_(std::move(mean)), std_(std::move(std)), is_hwc_(is_hwc) {} @@ -85,4 +85,4 @@ void NormalizeOp::Print(std::ostream &out) const { out << "}" << std::endl; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/normalize_op.h b/mindspore-lite/minddata/dataset/kernels/image/normalize_op.h index 5472a27c..01669950 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/normalize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/normalize_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class NormalizeOp : public TensorOp { public: @@ -44,5 +44,5 @@ class NormalizeOp : public TensorOp { bool is_hwc_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_NORMALIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/normalize_pad_op.cc b/mindspore-lite/minddata/dataset/kernels/image/normalize_pad_op.cc index 5ef81a5c..b4ec7d37 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/normalize_pad_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/normalize_pad_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { NormalizePadOp::NormalizePadOp(std::vector mean, std::vector std, std::string dtype, bool is_hwc) : mean_(std::move(mean)), std_(std::move(std)), dtype_(std::move(dtype)), is_hwc_(is_hwc) {} @@ -45,4 +45,4 @@ void NormalizePadOp::Print(std::ostream &out) const { out << "}" << std::endl; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/normalize_pad_op.h b/mindspore-lite/minddata/dataset/kernels/image/normalize_pad_op.h index 3c4af563..19828c73 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/normalize_pad_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/normalize_pad_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class NormalizePadOp : public TensorOp { public: @@ -45,5 +45,5 @@ class NormalizePadOp : public TensorOp { bool is_hwc_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_NORMALIZE_PAD_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/pad_op.cc b/mindspore-lite/minddata/dataset/kernels/image/pad_op.cc index a8e7301a..790cae09 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/pad_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/pad_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const BorderType PadOp::kDefBorderType = BorderType::kConstant; const uint8_t PadOp::kDefFillR = 0; @@ -56,4 +56,4 @@ Status PadOp::OutputShape(const std::vector &inputs, std::vector size, std::vector offset, std::vector fill_value, BorderType padding_mode) @@ -87,4 +87,4 @@ Status PadToSizeOp::OutputShape(const std::vector &inputs, std::vec return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/pad_to_size_op.h b/mindspore-lite/minddata/dataset/kernels/image/pad_to_size_op.h index 70166ccc..df9d0bfa 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/pad_to_size_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/pad_to_size_op.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class PadToSizeOp : public TensorOp { public: @@ -44,5 +44,5 @@ class PadToSizeOp : public TensorOp { BorderType boarder_type_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_PAD_TO_SIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/perspective_op.cc b/mindspore-lite/minddata/dataset/kernels/image/perspective_op.cc index 84d3b485..e85578f7 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/perspective_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/perspective_op.cc @@ -17,7 +17,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { PerspectiveOp::PerspectiveOp(const std::vector> &start_points, const std::vector> &end_points, InterpolationMode interpolation) @@ -28,4 +28,4 @@ Status PerspectiveOp::Compute(const std::shared_ptr &input, std::shared_ return Perspective(input, output, start_points_, end_points_, interpolation_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/perspective_op.h b/mindspore-lite/minddata/dataset/kernels/image/perspective_op.h index 104b25e5..e5180222 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/perspective_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/perspective_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class PerspectiveOp : public TensorOp { public: @@ -45,5 +45,5 @@ class PerspectiveOp : public TensorOp { InterpolationMode interpolation_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_PERSPECTIVE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/posterize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/posterize_op.cc index f9c6587a..a076a9d8 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/posterize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/posterize_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { PosterizeOp::PosterizeOp(uint8_t bit) : bit_(bit) {} @@ -27,4 +27,4 @@ Status PosterizeOp::Compute(const std::shared_ptr &input, std::shared_pt return Posterize(input, output, bit_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/posterize_op.h b/mindspore-lite/minddata/dataset/kernels/image/posterize_op.h index a62a1549..0f5d5635 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/posterize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/posterize_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class PosterizeOp : public TensorOp { public: @@ -42,5 +42,5 @@ class PosterizeOp : public TensorOp { uint8_t bit_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_POSTERIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/rand_augment_op.cc b/mindspore-lite/minddata/dataset/kernels/image/rand_augment_op.cc index a1736dbe..88bfdf6e 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rand_augment_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/rand_augment_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandAugmentOp::RandAugmentOp(int32_t num_ops, int32_t magnitude, int32_t num_magnitude_bins, InterpolationMode interpolation, std::vector fill_value) @@ -90,4 +90,4 @@ int32_t RandAugmentOp::RandInt(int32_t low, int32_t high) { return dis(random_generator_) % (high - low) + low; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/rand_augment_op.h b/mindspore-lite/minddata/dataset/kernels/image/rand_augment_op.h index 73c53c7c..dea3ff93 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rand_augment_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/rand_augment_op.h @@ -37,7 +37,7 @@ typedef std::map, bool>> Space; -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandAugmentOp : public RandomTensorOp { public: @@ -62,5 +62,5 @@ class RandAugmentOp : public RandomTensorOp { std::vector fill_value_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RAND_AUGMENT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_adjust_sharpness_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_adjust_sharpness_op.cc index b021c1a5..3e9fa222 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_adjust_sharpness_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_adjust_sharpness_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomAdjustSharpnessOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -39,4 +39,4 @@ Status RandomAdjustSharpnessOp::Compute(const std::shared_ptr &input, st return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_adjust_sharpness_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_adjust_sharpness_op.h index 1c531e4e..dea8c6cf 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_adjust_sharpness_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_adjust_sharpness_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomAdjustSharpnessOp : public RandomTensorOp { public: @@ -47,5 +47,5 @@ class RandomAdjustSharpnessOp : public RandomTensorOp { std::bernoulli_distribution distribution_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_ADJUST_SHARPNESS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_affine_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_affine_op.cc index fbcd7e1c..a22dd1a9 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_affine_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_affine_op.cc @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/math_utils.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const std::vector RandomAffineOp::kDegreesRange = {0.0, 0.0}; const std::vector RandomAffineOp::kTranslationPercentages = {0.0, 0.0, 0.0, 0.0}; @@ -88,4 +88,4 @@ Status RandomAffineOp::Compute(const std::shared_ptr &input, std::shared return Affine(input, output, degrees, translation, scale, shear, interpolation_, fill_value_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_affine_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_affine_op.h index b2e70ef5..478e26bd 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_affine_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_affine_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/affine_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomAffineOp : public RandomTensorOp { public: @@ -58,5 +58,5 @@ class RandomAffineOp : public RandomTensorOp { std::vector fill_value_; // fill_value }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_AFFINE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_auto_contrast_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_auto_contrast_op.cc index 6e390920..e9fc54ff 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_auto_contrast_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_auto_contrast_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomAutoContrastOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -45,4 +45,4 @@ Status RandomAutoContrastOp::Compute(const std::shared_ptr &input, std:: return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_auto_contrast_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_auto_contrast_op.h index 20aae6c6..3bf092a5 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_auto_contrast_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_auto_contrast_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomAutoContrastOp : public RandomTensorOp { public: @@ -51,5 +51,5 @@ class RandomAutoContrastOp : public RandomTensorOp { std::bernoulli_distribution distribution_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_AUTO_CONTRAST_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_color_adjust_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_color_adjust_op.cc index 32af5d40..a30fcba8 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_color_adjust_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_color_adjust_op.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandomColorAdjustOp::RandomColorAdjustOp(float s_bright_factor, float e_bright_factor, float s_contrast_factor, float e_contrast_factor, float s_saturation_factor, float e_saturation_factor, @@ -90,4 +90,4 @@ Status RandomColorAdjustOp::Compute(const std::shared_ptr &input, std::s return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_color_adjust_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_color_adjust_op.h index a256a23b..5b957808 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_color_adjust_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_color_adjust_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomColorAdjustOp : public RandomTensorOp { public: @@ -69,5 +69,5 @@ class RandomColorAdjustOp : public RandomTensorOp { float hue_factor_end_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_COLOR_ADJUST_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_color_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_color_op.cc index ecefa1e6..1cc75bfd 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_color_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_color_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/core/cv_tensor.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandomColorOp::RandomColorOp(float t_lb, float t_ub) : dist_(t_lb, t_ub), t_lb_(t_lb), t_ub_(t_ub) {} @@ -64,4 +64,4 @@ Status RandomColorOp::Compute(const std::shared_ptr &input, std::shared_ return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_color_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_color_op.h index 418cbb08..ed7ba136 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_color_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_color_op.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \class RandomColorOp random_color_op.h /// \brief Blends an image with its grayscale version with random weights @@ -63,5 +63,5 @@ class RandomColorOp : public RandomTensorOp { float t_ub_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_RANDOM_COLOR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_op.cc index 38230e15..dc33a270 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandomCropAndResizeOp::RandomCropAndResizeOp(int32_t target_height, int32_t target_width, float scale_lb, float scale_ub, float aspect_lb, float aspect_ub, @@ -196,4 +196,4 @@ Status RandomCropAndResizeOp::GetCropBox(int h_in, int w_in, int *x, int *y, int return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_op.h index f32ba950..146ea75b 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomCropAndResizeOp : public RandomTensorOp { public: @@ -65,5 +65,5 @@ class RandomCropAndResizeOp : public RandomTensorOp { double aspect_ub_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_CROP_AND_RESIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_with_bbox_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_with_bbox_op.cc index 0eb46ef4..350f4c14 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_with_bbox_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_with_bbox_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomCropAndResizeWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -53,4 +53,4 @@ Status RandomCropAndResizeWithBBoxOp::Compute(const TensorRow &input, TensorRow return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_with_bbox_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_with_bbox_op.h index 1b709544..cfbb3ab3 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_with_bbox_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_with_bbox_op.h @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/random_crop_and_resize_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomCropAndResizeWithBBoxOp : public RandomCropAndResizeOp { public: @@ -42,5 +42,5 @@ class RandomCropAndResizeWithBBoxOp : public RandomCropAndResizeOp { std::string Name() const override { return kRandomCropAndResizeWithBBoxOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_CROP_AND_RESIZE_WITH_BBOX_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_crop_decode_resize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_crop_decode_resize_op.cc index ee5b13f1..b0983d0c 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_crop_decode_resize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_crop_decode_resize_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/decode_op.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandomCropDecodeResizeOp::RandomCropDecodeResizeOp(int32_t target_height, int32_t target_width, float scale_lb, float scale_ub, float aspect_lb, float aspect_ub, @@ -62,4 +62,4 @@ Status RandomCropDecodeResizeOp::Compute(const TensorRow &input, TensorRow *outp return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_crop_decode_resize_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_crop_decode_resize_op.h index 45bcef68..9f348ede 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_crop_decode_resize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_crop_decode_resize_op.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomCropDecodeResizeOp : public RandomCropAndResizeOp { public: @@ -48,5 +48,5 @@ class RandomCropDecodeResizeOp : public RandomCropAndResizeOp { std::string Name() const override { return kRandomCropDecodeResizeOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_CROP_DECODE_RESIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_crop_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_crop_op.cc index 6213bc40..6306297f 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_crop_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_crop_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandomCropOp::RandomCropOp(int32_t crop_height, int32_t crop_width, int32_t pad_top, int32_t pad_bottom, int32_t pad_left, int32_t pad_right, bool pad_if_needed, BorderType padding_mode, @@ -223,4 +223,4 @@ Status RandomCropOp::OutputShape(const std::vector &inputs, std::ve std::to_string(inputs[0].Rank())); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_crop_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_crop_op.h index 7fbe9b10..627bc323 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_crop_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_crop_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomCropOp : public RandomTensorOp { public: @@ -88,5 +88,5 @@ class RandomCropOp : public RandomTensorOp { uint8_t fill_b_ = 0; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_CROP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_crop_with_bbox_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_crop_with_bbox_op.cc index aff3ee7c..2b99bc47 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_crop_with_bbox_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_crop_with_bbox_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomCropWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -71,4 +71,4 @@ Status RandomCropWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) return Crop(pad_image, &(*output)[0], x, y, RandomCropOp::crop_width_, RandomCropOp::crop_height_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_crop_with_bbox_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_crop_with_bbox_op.h index 71e2ace0..a68efdd7 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_crop_with_bbox_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_crop_with_bbox_op.h @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/random_crop_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomCropWithBBoxOp : public RandomCropOp { public: @@ -44,5 +44,5 @@ class RandomCropWithBBoxOp : public RandomCropOp { std::string Name() const override { return kRandomCropWithBBoxOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_CROP_WITH_BBOX_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_equalize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_equalize_op.cc index 85f6c61b..744cb0b1 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_equalize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_equalize_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomEqualizeOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -41,4 +41,4 @@ Status RandomEqualizeOp::Compute(const std::shared_ptr &input, std::shar return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_equalize_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_equalize_op.h index f954b93c..1f89760e 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_equalize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_equalize_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomEqualizeOp : public RandomTensorOp { public: @@ -47,5 +47,5 @@ class RandomEqualizeOp : public RandomTensorOp { std::bernoulli_distribution distribution_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_EQUALIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_op.cc index 8fde3486..88b9e6b9 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomHorizontalFlipOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -64,4 +64,4 @@ Status RandomHorizontalFlipOp::Compute(const TensorRow &input, TensorRow *output return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_op.h index 2d04115b..6a2e59c3 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomHorizontalFlipOp : public RandomTensorOp { public: @@ -51,5 +51,5 @@ class RandomHorizontalFlipOp : public RandomTensorOp { std::bernoulli_distribution distribution_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_HORIZONTAL_FLIP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_with_bbox_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_with_bbox_op.cc index b08d95ba..00b092f3 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_with_bbox_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_with_bbox_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomHorizontalFlipWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -55,4 +55,4 @@ Status RandomHorizontalFlipWithBBoxOp::Compute(const TensorRow &input, TensorRow return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_with_bbox_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_with_bbox_op.h index 2ff91c64..ed618d61 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_with_bbox_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_horizontal_flip_with_bbox_op.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomHorizontalFlipWithBBoxOp : public RandomTensorOp { public: @@ -49,5 +49,5 @@ class RandomHorizontalFlipWithBBoxOp : public RandomTensorOp { std::bernoulli_distribution distribution_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_HORIZONTAL_FLIP_BBOX_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_invert_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_invert_op.cc index 88f8dfbf..738abe0b 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_invert_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_invert_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomInvertOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -42,4 +42,4 @@ Status RandomInvertOp::Compute(const std::shared_ptr &input, std::shared return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_invert_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_invert_op.h index e2ccfb01..65df6b1e 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_invert_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_invert_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomInvertOp : public RandomTensorOp { public: @@ -48,5 +48,5 @@ class RandomInvertOp : public RandomTensorOp { std::bernoulli_distribution distribution_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_INVERT_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_lighting_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_lighting_op.cc index d7164465..6bbca37b 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_lighting_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_lighting_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomLightingOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -33,4 +33,4 @@ Status RandomLightingOp::Compute(const std::shared_ptr &input, std::shar return RandomLighting(input, output, rnd_r, rnd_g, rnd_b); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_lighting_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_lighting_op.h index e1f9b882..4cd6df92 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_lighting_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_lighting_op.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomLightingOp : public RandomTensorOp { public: @@ -43,5 +43,5 @@ class RandomLightingOp : public RandomTensorOp { std::normal_distribution dist_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_RANDOM_LIGHTING_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_posterize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_posterize_op.cc index 1e18005f..aeccdff6 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_posterize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_posterize_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandomPosterizeOp::RandomPosterizeOp(const std::vector &bit_range) : bit_range_(bit_range) {} @@ -32,4 +32,4 @@ Status RandomPosterizeOp::Compute(const std::shared_ptr &input, std::sha return Posterize(input, output, bits); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_posterize_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_posterize_op.h index 0e58ebdf..c4b63d87 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_posterize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_posterize_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomPosterizeOp : public RandomTensorOp { public: @@ -42,5 +42,5 @@ class RandomPosterizeOp : public RandomTensorOp { std::vector bit_range_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_POSTERIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_resize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_resize_op.cc index b9ef7e17..8e51cd26 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_resize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_resize_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/core/config_manager.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomResizeOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -34,4 +34,4 @@ Status RandomResizeOp::Compute(const TensorRow &input, TensorRow *output) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_resize_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_resize_op.h index 06e57703..3f32e118 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_resize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_resize_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomResizeOp : public RandomTensorOp { public: @@ -51,5 +51,5 @@ class RandomResizeOp : public RandomTensorOp { std::uniform_int_distribution distribution_{0, 3}; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_RESIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_resize_with_bbox_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_resize_with_bbox_op.cc index 5d9a80df..7de4d0c7 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_resize_with_bbox_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_resize_with_bbox_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/resize_with_bbox_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomResizeWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) { // Randomly selects from the following four interpolation methods @@ -31,4 +31,4 @@ Status RandomResizeWithBBoxOp::Compute(const TensorRow &input, TensorRow *output return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_resize_with_bbox_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_resize_with_bbox_op.h index 60d08462..064e25b1 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_resize_with_bbox_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_resize_with_bbox_op.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomResizeWithBBoxOp : public RandomTensorOp { public: @@ -49,5 +49,5 @@ class RandomResizeWithBBoxOp : public RandomTensorOp { std::uniform_int_distribution distribution_{0, 3}; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_RESIZE_WITH_BBOX_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_rotation_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_rotation_op.cc index 6c787f20..6dd42e2f 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_rotation_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_rotation_op.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // constructor RandomRotationOp::RandomRotationOp(float start_degree, float end_degree, InterpolationMode resample, bool expand, @@ -82,4 +82,4 @@ Status RandomRotationOp::OutputShape(const std::vector &inputs, std return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_rotation_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_rotation_op.h index 6f7d287c..113ad73e 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_rotation_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_rotation_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomRotationOp : public RandomTensorOp { public: @@ -69,5 +69,5 @@ class RandomRotationOp : public RandomTensorOp { std::uniform_real_distribution distribution_{-1.0, 1.0}; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_ROTATION_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_select_subpolicy_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_select_subpolicy_op.cc index baa74521..07684ef9 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_select_subpolicy_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_select_subpolicy_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { RandomSelectSubpolicyOp::RandomSelectSubpolicyOp(const std::vector &policy) : policy_(policy), rand_int_(0, policy.size() - 1), rand_double_(0, 1) {} @@ -90,4 +90,4 @@ Status RandomSelectSubpolicyOp::OutputType(const std::vector &inputs, return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_select_subpolicy_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_select_subpolicy_op.h index 685119ca..b7c288e6 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_select_subpolicy_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_select_subpolicy_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using Subpolicy = std::vector, double>>; @@ -72,5 +72,5 @@ class RandomSelectSubpolicyOp : public RandomTensorOp { std::uniform_real_distribution rand_double_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_SELECT_SUBPOLICY_OP_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_sharpness_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_sharpness_op.cc index 94eb3be9..a9b5a875 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_sharpness_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_sharpness_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// constructor RandomSharpnessOp::RandomSharpnessOp(float start_degree, float end_degree) @@ -37,4 +37,4 @@ Status RandomSharpnessOp::Compute(const std::shared_ptr &input, std::sha return AdjustSharpness(input, output, alpha); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_sharpness_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_sharpness_op.h index 23d4f207..8a2e4cd5 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_sharpness_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_sharpness_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomSharpnessOp : public RandomTensorOp { public: @@ -46,5 +46,5 @@ class RandomSharpnessOp : public RandomTensorOp { std::uniform_real_distribution distribution_{-1.0, 1.0}; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_SHARPNESS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_solarize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_solarize_op.cc index 210d79a8..f22ff585 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_solarize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_solarize_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomSolarizeOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -43,4 +43,4 @@ Status RandomSolarizeOp::Compute(const std::shared_ptr &input, std::shar return Solarize(input, output, {threshold_min, threshold_max}); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_solarize_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_solarize_op.h index 2a649466..d031f8b7 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_solarize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_solarize_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomSolarizeOp : public RandomTensorOp { public: @@ -43,5 +43,5 @@ class RandomSolarizeOp : public RandomTensorOp { std::vector threshold_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_SOLARIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_op.cc index e2775243..a9a9cdcf 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomVerticalFlipOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -41,4 +41,4 @@ Status RandomVerticalFlipOp::Compute(const TensorRow &input, TensorRow *output) return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_op.h index e6197d7c..e98433eb 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomVerticalFlipOp : public RandomTensorOp { public: @@ -45,5 +45,5 @@ class RandomVerticalFlipOp : public RandomTensorOp { std::bernoulli_distribution distribution_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_VERTICAL_FLIP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_with_bbox_op.cc b/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_with_bbox_op.cc index 00379bd2..2e4031ca 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_with_bbox_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_with_bbox_op.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/bounding_box.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RandomVerticalFlipWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -53,4 +53,4 @@ Status RandomVerticalFlipWithBBoxOp::Compute(const TensorRow &input, TensorRow * return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_with_bbox_op.h b/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_with_bbox_op.h index a77f600e..cedb1c63 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_with_bbox_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/random_vertical_flip_with_bbox_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RandomVerticalFlipWithBBoxOp : public RandomTensorOp { public: @@ -43,5 +43,5 @@ class RandomVerticalFlipWithBBoxOp : public RandomTensorOp { std::bernoulli_distribution distribution_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RANDOM_VERTICAL_FLIP_WITH_BBOX_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/rescale_op.cc b/mindspore-lite/minddata/dataset/kernels/image/rescale_op.cc index 9f3080a0..9bbb1860 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rescale_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/rescale_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RescaleOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -32,4 +32,4 @@ Status RescaleOp::OutputType(const std::vector &inputs, std::vector #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { // using 8 bits for result constexpr uint8_t PrecisionBits = 22; @@ -300,4 +300,4 @@ bool ResizeCubic(const LiteMat &input, const LiteMat &dst, int dst_w, int dst_h) return res; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/resize_cubic_op.h b/mindspore-lite/minddata/dataset/kernels/image/resize_cubic_op.h index cb31c148..6eb99d66 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/resize_cubic_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/resize_cubic_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Calculate the coefficient for interpolation firstly int calc_coeff(int input_size, int out_size, int input0, int input1, const struct interpolation *interp, @@ -52,5 +52,5 @@ bool ImageInterpolation(LiteMat input, LiteMat &output, int x_size, int y_size, /// \param[in] dst_h expected Output image height bool ResizeCubic(const LiteMat &input, const LiteMat &dst, int dst_w, int dst_h); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RESIZE_CUBIC_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/resize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/resize_op.cc index d53f5044..4debaf12 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/resize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/resize_op.cc @@ -25,7 +25,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int32_t ResizeOp::kDefWidth = 0; const InterpolationMode ResizeOp::kDefInterpolation = InterpolationMode::kLinear; @@ -121,4 +121,4 @@ TensorShape ResizeOp::ComputeOutputShape(const TensorShape &input, int32_t outpu return out; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/resize_op.h b/mindspore-lite/minddata/dataset/kernels/image/resize_op.h index db81abec..918d5f59 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/resize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/resize_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ResizeOp : public TensorOp { public: @@ -64,5 +64,5 @@ class ResizeOp : public TensorOp { InterpolationMode interpolation_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RESIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/resize_preserve_ar_op.cc b/mindspore-lite/minddata/dataset/kernels/image/resize_preserve_ar_op.cc index 681d94e4..1f99cbb2 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/resize_preserve_ar_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/resize_preserve_ar_op.cc @@ -20,7 +20,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int32_t ResizePreserveAROp::kDefImgOrientation = 0; @@ -35,4 +35,4 @@ Status ResizePreserveAROp::Compute(const TensorRow &inputs, TensorRow *outputs) return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/resize_preserve_ar_op.h b/mindspore-lite/minddata/dataset/kernels/image/resize_preserve_ar_op.h index d8956e90..eda59653 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/resize_preserve_ar_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/resize_preserve_ar_op.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ResizePreserveAROp : public TensorOp { public: @@ -50,5 +50,5 @@ class ResizePreserveAROp : public TensorOp { int32_t img_orientation_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RESIZE_PRESERVE_AR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/resize_with_bbox_op.cc b/mindspore-lite/minddata/dataset/kernels/image/resize_with_bbox_op.cc index a415eabc..7823087c 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/resize_with_bbox_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/resize_with_bbox_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status ResizeWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) { IO_CHECK_VECTOR(input, output); @@ -49,4 +49,4 @@ Status ResizeWithBBoxOp::Compute(const TensorRow &input, TensorRow *output) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/resize_with_bbox_op.h b/mindspore-lite/minddata/dataset/kernels/image/resize_with_bbox_op.h index b2bc0f6a..f8744fb4 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/resize_with_bbox_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/resize_with_bbox_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ResizeWithBBoxOp : public ResizeOp { public: @@ -55,5 +55,5 @@ class ResizeWithBBoxOp : public ResizeOp { uint32_t NumOutput() override { return 2; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RESIZE_WITH_BBOX_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/resized_crop_op.cc b/mindspore-lite/minddata/dataset/kernels/image/resized_crop_op.cc index a1b56e4f..674a28bd 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/resized_crop_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/resized_crop_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status ResizedCropOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { // input output tensor shape check @@ -90,4 +90,4 @@ Status ResizedCropOp::OutputShape(const std::vector &inputs, std::v return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/resized_crop_op.h b/mindspore-lite/minddata/dataset/kernels/image/resized_crop_op.h index ea883db8..90c40f1e 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/resized_crop_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/resized_crop_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ResizedCropOp : public TensorOp { public: @@ -55,5 +55,5 @@ class ResizedCropOp : public TensorOp { InterpolationMode interpolation_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_RESIZED_CROP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/rgb_to_bgr_op.cc b/mindspore-lite/minddata/dataset/kernels/image/rgb_to_bgr_op.cc index 918450b5..bc959411 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rgb_to_bgr_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/rgb_to_bgr_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RgbToBgrOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -33,4 +33,4 @@ Status RgbToBgrOp::Compute(const std::shared_ptr &input, std::shared_ptr return RgbToBgr(input, output); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/rgb_to_bgr_op.h b/mindspore-lite/minddata/dataset/kernels/image/rgb_to_bgr_op.h index 59b34de4..4d7fe493 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rgb_to_bgr_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/rgb_to_bgr_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RgbToBgrOp : public TensorOp { public: @@ -37,5 +37,5 @@ class RgbToBgrOp : public TensorOp { std::string Name() const override { return kRgbToBgrOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_RGB_TO_BGR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/rgb_to_gray_op.cc b/mindspore-lite/minddata/dataset/kernels/image/rgb_to_gray_op.cc index 96ff4f76..356378b7 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rgb_to_gray_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/rgb_to_gray_op.cc @@ -21,11 +21,11 @@ #include "mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RgbToGrayOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); return RgbToGray(input, output); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/rgb_to_gray_op.h b/mindspore-lite/minddata/dataset/kernels/image/rgb_to_gray_op.h index 26174c42..f5e46e44 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rgb_to_gray_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/rgb_to_gray_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RgbToGrayOp : public TensorOp { public: @@ -37,5 +37,5 @@ class RgbToGrayOp : public TensorOp { std::string Name() const override { return kRgbToGrayOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_RGB_TO_GRAY_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/rgba_to_bgr_op.cc b/mindspore-lite/minddata/dataset/kernels/image/rgba_to_bgr_op.cc index 611a8572..a20aa1ba 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rgba_to_bgr_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/rgba_to_bgr_op.cc @@ -18,11 +18,11 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RgbaToBgrOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); return RgbaToBgr(input, output); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/rgba_to_bgr_op.h b/mindspore-lite/minddata/dataset/kernels/image/rgba_to_bgr_op.h index 3466d6d3..7deac49e 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rgba_to_bgr_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/rgba_to_bgr_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RgbaToBgrOp : public TensorOp { public: @@ -38,5 +38,5 @@ class RgbaToBgrOp : public TensorOp { std::string Name() const override { return kRgbaToBgrOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_RGBA_TO_BGR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/rgba_to_rgb_op.cc b/mindspore-lite/minddata/dataset/kernels/image/rgba_to_rgb_op.cc index 3bcc4959..f9cad1e3 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rgba_to_rgb_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/rgba_to_rgb_op.cc @@ -18,11 +18,11 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status RgbaToRgbOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); return RgbaToRgb(input, output); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/rgba_to_rgb_op.h b/mindspore-lite/minddata/dataset/kernels/image/rgba_to_rgb_op.h index d0840a38..2c81f8e5 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rgba_to_rgb_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/rgba_to_rgb_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RgbaToRgbOp : public TensorOp { public: @@ -38,5 +38,5 @@ class RgbaToRgbOp : public TensorOp { std::string Name() const override { return kRgbaToRgbOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_RGBA_TO_RGB_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/rotate_op.cc b/mindspore-lite/minddata/dataset/kernels/image/rotate_op.cc index 5f542d9f..9b3efc7f 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rotate_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/rotate_op.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/lite_image_utils.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { const std::vector RotateOp::kDefCenter = {}; const InterpolationMode RotateOp::kDefInterpolation = InterpolationMode::kNearestNeighbour; @@ -150,4 +150,4 @@ TensorShape RotateOp::ConstructShape(const TensorShape &in_shape) const { return out; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/rotate_op.h b/mindspore-lite/minddata/dataset/kernels/image/rotate_op.h index 79b57ea6..4fe73197 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/rotate_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/rotate_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class RotateOp : public TensorOp { public: @@ -70,5 +70,5 @@ class RotateOp : public TensorOp { uint8_t fill_b_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_ROTATE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/sharpness_op.cc b/mindspore-lite/minddata/dataset/kernels/image/sharpness_op.cc index 3be4a47b..241889a2 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/sharpness_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/sharpness_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/core/cv_tensor.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const float SharpnessOp::kDefAlpha = 1.0; @@ -29,4 +29,4 @@ Status SharpnessOp::Compute(const std::shared_ptr &input, std::shared_pt return AdjustSharpness(input, output, alpha_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/sharpness_op.h b/mindspore-lite/minddata/dataset/kernels/image/sharpness_op.h index 2c4facf6..d492d623 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/sharpness_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/sharpness_op.h @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SharpnessOp : public TensorOp { public: @@ -48,5 +48,5 @@ class SharpnessOp : public TensorOp { float alpha_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_SHARPNESS_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/slice_patches_op.cc b/mindspore-lite/minddata/dataset/kernels/image/slice_patches_op.cc index 0b04b61b..6785e091 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/slice_patches_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/slice_patches_op.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int32_t SlicePatchesOp::kDefNumH = 1; const int32_t SlicePatchesOp::kDefNumW = 1; @@ -49,4 +49,4 @@ Status SlicePatchesOp::Compute(const TensorRow &input, TensorRow *output) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/slice_patches_op.h b/mindspore-lite/minddata/dataset/kernels/image/slice_patches_op.h index 738280c2..7be831b8 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/slice_patches_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/slice_patches_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SlicePatchesOp : public TensorOp { public: @@ -55,5 +55,5 @@ class SlicePatchesOp : public TensorOp { uint8_t fill_value_; // border width in number of pixels in right and bottom direction }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_SLICE_PATCHES_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/solarize_op.cc b/mindspore-lite/minddata/dataset/kernels/image/solarize_op.cc index 5ab2be9f..b51abf60 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/solarize_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/solarize_op.cc @@ -19,11 +19,11 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status SolarizeOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); return Solarize(input, output, threshold_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/solarize_op.h b/mindspore-lite/minddata/dataset/kernels/image/solarize_op.h index f663604d..7895158c 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/solarize_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/solarize_op.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SolarizeOp : public TensorOp { public: @@ -42,5 +42,5 @@ class SolarizeOp : public TensorOp { std::vector threshold_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_SOLARIZE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/swap_red_blue_op.cc b/mindspore-lite/minddata/dataset/kernels/image/swap_red_blue_op.cc index 9b24c74d..c226b8a5 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/swap_red_blue_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/swap_red_blue_op.cc @@ -18,11 +18,11 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status SwapRedBlueOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); return SwapRedAndBlue(input, output); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/swap_red_blue_op.h b/mindspore-lite/minddata/dataset/kernels/image/swap_red_blue_op.h index cbd6ce5f..16d7618c 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/swap_red_blue_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/swap_red_blue_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SwapRedBlueOp : public TensorOp { public: @@ -45,5 +45,5 @@ class SwapRedBlueOp : public TensorOp { std::string Name() const override { return kSwapRedBlueOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_SWAP_RED_BLUE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/to_tensor_op.cc b/mindspore-lite/minddata/dataset/kernels/image/to_tensor_op.cc index 0eaebf19..a517e427 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/to_tensor_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/to_tensor_op.cc @@ -22,7 +22,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status ToTensorOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -35,4 +35,4 @@ Status ToTensorOp::Compute(const std::shared_ptr &input, std::shared_ptr return ToTensor(input, output, output_type_); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/to_tensor_op.h b/mindspore-lite/minddata/dataset/kernels/image/to_tensor_op.h index 533db098..dade1e57 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/to_tensor_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/to_tensor_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class ToTensorOp : public TensorOp { public: @@ -41,5 +41,5 @@ class ToTensorOp : public TensorOp { DataType output_type_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_TO_TENSOR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/trivial_augment_wide_op.cc b/mindspore-lite/minddata/dataset/kernels/image/trivial_augment_wide_op.cc index 3896a708..dd69bc47 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/trivial_augment_wide_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/trivial_augment_wide_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { TrivialAugmentWideOp::TrivialAugmentWideOp(int32_t num_magnitude_bins, InterpolationMode interpolation, const std::vector &fill_value) @@ -87,4 +87,4 @@ int32_t TrivialAugmentWideOp::RandInt(int32_t low, int32_t high) { return dis(random_generator_) % (high - low) + low; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/trivial_augment_wide_op.h b/mindspore-lite/minddata/dataset/kernels/image/trivial_augment_wide_op.h index cef44188..612a9810 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/trivial_augment_wide_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/trivial_augment_wide_op.h @@ -35,7 +35,7 @@ typedef std::map, bool>> Space; -namespace mindspore { +namespace mindspore::lite { namespace dataset { constexpr char kTrivialAugmentWideOp[] = "TrivialAugmentWideOp"; @@ -60,5 +60,5 @@ class TrivialAugmentWideOp : public RandomTensorOp { std::vector fill_value_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_TRIVIAL_AUGMENT_WIDE_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/uniform_aug_op.cc b/mindspore-lite/minddata/dataset/kernels/image/uniform_aug_op.cc index caca1596..8feccca7 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/uniform_aug_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/uniform_aug_op.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/util/random.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { UniformAugOp::UniformAugOp(std::vector> op_list, int32_t num_ops) : tensor_op_list_(std::move(op_list)), num_ops_(num_ops) {} @@ -59,4 +59,4 @@ Status UniformAugOp::Compute(const TensorRow &input, TensorRow *output) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/uniform_aug_op.h b/mindspore-lite/minddata/dataset/kernels/image/uniform_aug_op.h index 932137e7..bcdf3afa 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/uniform_aug_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/uniform_aug_op.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class UniformAugOp : public RandomTensorOp { public: @@ -50,5 +50,5 @@ class UniformAugOp : public RandomTensorOp { int32_t num_ops_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_UNIFORM_AUG_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/vertical_flip_op.cc b/mindspore-lite/minddata/dataset/kernels/image/vertical_flip_op.cc index 496bd41d..ac461c8c 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/vertical_flip_op.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/vertical_flip_op.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/data/data_utils.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status VerticalFlipOp::Compute(const std::shared_ptr &input, std::shared_ptr *output) { IO_CHECK(input, output); @@ -53,4 +53,4 @@ Status VerticalFlipOp::Compute(const std::shared_ptr &input, std::shared return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/vertical_flip_op.h b/mindspore-lite/minddata/dataset/kernels/image/vertical_flip_op.h index d508caa0..4bab8868 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/vertical_flip_op.h +++ b/mindspore-lite/minddata/dataset/kernels/image/vertical_flip_op.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/tensor_op.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class VerticalFlipOp : public TensorOp { public: @@ -36,5 +36,5 @@ class VerticalFlipOp : public TensorOp { std::string Name() const override { return kVerticalFlipOp; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_VERTICAL_FLIP_OP_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/image/video_utils.cc b/mindspore-lite/minddata/dataset/kernels/image/video_utils.cc index f2c1e74e..5e114651 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/video_utils.cc +++ b/mindspore-lite/minddata/dataset/kernels/image/video_utils.cc @@ -42,7 +42,7 @@ extern "C" { const int32_t MAX_AVIO_BUFFER_SIZE = 1073741824; -namespace mindspore { +namespace mindspore::lite { namespace dataset { struct MediaContainer { int channels = 1; @@ -1460,4 +1460,4 @@ Status ReadVideoTimestamps(const std::string &filename, std::vector *pt return AVReadVisualPts(&avinfo, pts_int64_vector, video_fps, time_base); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/image/video_utils.h b/mindspore-lite/minddata/dataset/kernels/image/video_utils.h index 4750c08f..3108e333 100644 --- a/mindspore-lite/minddata/dataset/kernels/image/video_utils.h +++ b/mindspore-lite/minddata/dataset/kernels/image/video_utils.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor_row.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Decode the raw input video bytes. Supported video formats are AVI, H264, H265, MOV, MP4 and WMV. /// \param input: CVTensor containing the not decoded video 1D bytes. @@ -56,5 +56,5 @@ Status ReadVideo(const std::string &filename, std::shared_ptr *video_out Status ReadVideoTimestamps(const std::string &filename, std::vector *pts_int64_vector, float *video_fps, float *time_base, const std::string &pts_unit); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IMAGE_VIDEO_UTILS_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/data/transforms_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/data/transforms_ir.cc index c6c0d6fc..93762cbe 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/data/transforms_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/data/transforms_ir.cc @@ -57,7 +57,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Transform operations for data. namespace transforms { @@ -530,4 +530,4 @@ Status PluginOperation::from_json(nlohmann::json op_params, std::shared_ptr { @@ -57,5 +57,5 @@ class TensorOperation : public std::enable_shared_from_this { bool random_op_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_TENSOR_OPERATION_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/transforms_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/transforms_ir.cc index c6c0d6fc..93762cbe 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/transforms_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/transforms_ir.cc @@ -57,7 +57,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Transform operations for data. namespace transforms { @@ -530,4 +530,4 @@ Status PluginOperation::from_json(nlohmann::json op_params, std::shared_ptr &translation, float scale, @@ -155,4 +155,4 @@ MapTargetDevice AffineOperation::Type() { } } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/affine_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/affine_ir.h index fbc3f42a..d4369b9b 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/affine_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/affine_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kAffineOperation[] = "Affine"; @@ -63,5 +63,5 @@ class AffineOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_AFFINE_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/ascend_vision_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/ascend_vision_ir.cc index 072be52d..e75d0550 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/ascend_vision_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/ascend_vision_ir.cc @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/util/path.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Transform operations for computer vision namespace vision { @@ -489,4 +489,4 @@ Status DvppResizeJpegOperation::from_json(nlohmann::json op_params, std::shared_ } } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/ascend_vision_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/ascend_vision_ir.h index 55628936..73b608b5 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/ascend_vision_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/ascend_vision_ir.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Transform operations for computer vision namespace vision { @@ -197,5 +197,5 @@ class DvppResizeJpegOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_ASCEND_VISION_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_augment_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_augment_ir.cc index c2f5f50e..3f0716c8 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_augment_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_augment_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -78,4 +78,4 @@ Status AutoAugmentOperation::from_json(nlohmann::json op_params, std::shared_ptr #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_augment_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_augment_ir.h index 0b17ca52..e2bb2cf4 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_augment_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_augment_ir.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" #include "mindspore-lite/minddata/dataset/kernels/image/auto_augment_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kAutoAugmentOperation[] = "AutoAugment"; @@ -59,5 +59,5 @@ class AutoAugmentOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_AUTO_AUGMENT_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_contrast_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_contrast_ir.cc index 1b3eade4..07d702ac 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_contrast_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_contrast_ir.cc @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -114,4 +114,4 @@ MapTargetDevice AutoContrastOperation::Type() { #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_contrast_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_contrast_ir.h index 0301ba42..9d15be9c 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_contrast_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/auto_contrast_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kAutoContrastOperation[] = "AutoContrast"; @@ -57,5 +57,5 @@ class AutoContrastOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_AUTO_CONTRAST_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/bounding_box_augment_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/bounding_box_augment_ir.cc index 44f96a57..ea5d297f 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/bounding_box_augment_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/bounding_box_augment_ir.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -74,4 +74,4 @@ Status BoundingBoxAugmentOperation::from_json(nlohmann::json op_params, std::sha #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/bounding_box_augment_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/bounding_box_augment_ir.h index dc01f7a1..7ab288aa 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/bounding_box_augment_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/bounding_box_augment_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kBoundingBoxAugmentOperation[] = "BoundingBoxAugment"; @@ -54,5 +54,5 @@ class BoundingBoxAugmentOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_BOUNDING_BOX_AUGMENT_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/center_crop_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/center_crop_ir.cc index 187f03ac..4391335a 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/center_crop_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/center_crop_ir.cc @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { CenterCropOperation::CenterCropOperation(const std::vector &size) : size_(size) {} @@ -61,4 +61,4 @@ Status CenterCropOperation::from_json(nlohmann::json op_params, std::shared_ptr< } } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/center_crop_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/center_crop_ir.h index 72c97c67..69ec589a 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/center_crop_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/center_crop_ir.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/vision.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kCenterCropOperation[] = "CenterCrop"; @@ -55,5 +55,5 @@ class CenterCropOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_CENTER_CROP_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/convert_color_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/convert_color_ir.cc index 7d6cbed4..bb17d8c9 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/convert_color_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/convert_color_ir.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -97,4 +97,4 @@ MapTargetDevice ConvertColorOperation::Type() { #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/convert_color_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/convert_color_ir.h index 3761ad4f..fc0b5a72 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/convert_color_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/convert_color_ir.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kConvertColorOperation[] = "ConvertColor"; @@ -57,5 +57,5 @@ class ConvertColorOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_CONVERT_COLOR_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/crop_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/crop_ir.cc index 6668f789..a5defc69 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/crop_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/crop_ir.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { CropOperation::CropOperation(const std::vector &coordinates, const std::vector &size, @@ -112,4 +112,4 @@ MapTargetDevice CropOperation::Type() { } } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/crop_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/crop_ir.h index bd394bfe..848af0a0 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/crop_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/crop_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kCropOperation[] = "Crop"; @@ -58,5 +58,5 @@ class CropOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_CROP_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/cutmix_batch_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/cutmix_batch_ir.cc index 4e140eb7..3f0263fd 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/cutmix_batch_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/cutmix_batch_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -72,4 +72,4 @@ Status CutMixBatchOperation::from_json(nlohmann::json op_params, std::shared_ptr #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/cutmix_batch_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/cutmix_batch_ir.h index fceb7be4..8e21e343 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/cutmix_batch_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/cutmix_batch_ir.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kCutMixBatchOperation[] = "CutMixBatch"; @@ -54,5 +54,5 @@ class CutMixBatchOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_CUTMIX_BATCH_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/cutout_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/cutout_ir.cc index e74de073..e02f9321 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/cutout_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/cutout_ir.cc @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -73,4 +73,4 @@ Status CutOutOperation::from_json(nlohmann::json op_params, std::shared_ptr> &start_points, @@ -125,4 +125,4 @@ MapTargetDevice PerspectiveOperation::Type() { } } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/perspective_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/perspective_ir.h index c32bff05..417907af 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/perspective_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/perspective_ir.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kPerspectiveOperation[] = "Perspective"; @@ -61,5 +61,5 @@ class PerspectiveOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_PERSPECTIVE_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/posterize_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/posterize_ir.cc index d008bf51..13edcc15 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/posterize_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/posterize_ir.cc @@ -1,99 +1,99 @@ -/** - * Copyright 2022-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "mindspore-lite/minddata/dataset/kernels/ir/vision/posterize_ir.h" - -#ifndef ENABLE_ANDROID -#include "mindspore-lite/minddata/dataset/kernels/image/posterize_op.h" -#endif -#if !defined(BUILD_LITE) && defined(ENABLE_D) -#include "mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend910b/dvpp_posterize_op.h" -#endif -#include "mindspore-lite/minddata/dataset/util/validators.h" - -namespace mindspore { -namespace dataset { -namespace vision { -#ifndef ENABLE_ANDROID -// PosterizeOperation -PosterizeOperation::PosterizeOperation(uint8_t bits, const std::string &device_target) - : bits_(bits), device_target_(device_target) {} - -PosterizeOperation::~PosterizeOperation() = default; - -Status PosterizeOperation::ValidateParams() { - constexpr uint8_t kMinimumBitValue = 0; - constexpr uint8_t kMaximumBitValue = 8; - - if (bits_ < kMinimumBitValue || bits_ > kMaximumBitValue) { - std::string err_msg = "Posterize: bits is out of range [0, 8], got: " + std::to_string(bits_); - LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg); - } - - // device target - if (device_target_ != "CPU" && device_target_ != "Ascend") { - std::string err_msg = "Posterize: Invalid device target. It's not CPU or Ascend."; - LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg); - } - - return Status::OK(); -} - -std::shared_ptr PosterizeOperation::Build() { - if (device_target_ == "CPU") { - std::shared_ptr tensor_op = std::make_shared(bits_); - return tensor_op; -#if !defined(BUILD_LITE) && defined(ENABLE_D) - } else if (device_target_ == "Ascend") { - std::shared_ptr dvpp_tensor_op = std::make_shared(bits_); - return dvpp_tensor_op; -#endif - } else { - MS_LOG(ERROR) << "Posterize: Invalid device target. It's not CPU or Ascend."; - return nullptr; - } -} - -Status PosterizeOperation::to_json(nlohmann::json *out_json) { - RETURN_UNEXPECTED_IF_NULL(out_json); - (*out_json)["bits"] = bits_; - (*out_json)["device_target"] = device_target_; - return Status::OK(); -} - -Status PosterizeOperation::from_json(nlohmann::json op_params, std::shared_ptr *operation) { - RETURN_UNEXPECTED_IF_NULL(operation); - RETURN_IF_NOT_OK(ValidateParamInJson(op_params, "bits", kPosterizeOperation)); - RETURN_IF_NOT_OK(ValidateParamInJson(op_params, "device_target", kPosterizeOperation)); - uint8_t bits_ = op_params["bits"]; - std::string device_target = op_params["device_target"]; - *operation = std::make_shared(bits_, device_target); - return Status::OK(); -} - -MapTargetDevice PosterizeOperation::Type() { - if (device_target_ == "CPU") { - return MapTargetDevice::kCpu; - } else if (device_target_ == "Ascend") { - return MapTargetDevice::kAscend910B; - } else { - MS_LOG(ERROR) << "Posterize: Invalid device target. It's not CPU or Ascend."; - } - return MapTargetDevice::kInvalid; -} -#endif -} // namespace vision -} // namespace dataset -} // namespace mindspore +/** + * Copyright 2022-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "mindspore-lite/minddata/dataset/kernels/ir/vision/posterize_ir.h" + +#ifndef ENABLE_ANDROID +#include "mindspore-lite/minddata/dataset/kernels/image/posterize_op.h" +#endif +#if !defined(BUILD_LITE) && defined(ENABLE_D) +#include "mindspore-lite/minddata/dataset/kernels/image/dvpp/ascend910b/dvpp_posterize_op.h" +#endif +#include "mindspore-lite/minddata/dataset/util/validators.h" + +namespace mindspore::lite { +namespace dataset { +namespace vision { +#ifndef ENABLE_ANDROID +// PosterizeOperation +PosterizeOperation::PosterizeOperation(uint8_t bits, const std::string &device_target) + : bits_(bits), device_target_(device_target) {} + +PosterizeOperation::~PosterizeOperation() = default; + +Status PosterizeOperation::ValidateParams() { + constexpr uint8_t kMinimumBitValue = 0; + constexpr uint8_t kMaximumBitValue = 8; + + if (bits_ < kMinimumBitValue || bits_ > kMaximumBitValue) { + std::string err_msg = "Posterize: bits is out of range [0, 8], got: " + std::to_string(bits_); + LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg); + } + + // device target + if (device_target_ != "CPU" && device_target_ != "Ascend") { + std::string err_msg = "Posterize: Invalid device target. It's not CPU or Ascend."; + LOG_AND_RETURN_STATUS_SYNTAX_ERROR(err_msg); + } + + return Status::OK(); +} + +std::shared_ptr PosterizeOperation::Build() { + if (device_target_ == "CPU") { + std::shared_ptr tensor_op = std::make_shared(bits_); + return tensor_op; +#if !defined(BUILD_LITE) && defined(ENABLE_D) + } else if (device_target_ == "Ascend") { + std::shared_ptr dvpp_tensor_op = std::make_shared(bits_); + return dvpp_tensor_op; +#endif + } else { + MS_LOG(ERROR) << "Posterize: Invalid device target. It's not CPU or Ascend."; + return nullptr; + } +} + +Status PosterizeOperation::to_json(nlohmann::json *out_json) { + RETURN_UNEXPECTED_IF_NULL(out_json); + (*out_json)["bits"] = bits_; + (*out_json)["device_target"] = device_target_; + return Status::OK(); +} + +Status PosterizeOperation::from_json(nlohmann::json op_params, std::shared_ptr *operation) { + RETURN_UNEXPECTED_IF_NULL(operation); + RETURN_IF_NOT_OK(ValidateParamInJson(op_params, "bits", kPosterizeOperation)); + RETURN_IF_NOT_OK(ValidateParamInJson(op_params, "device_target", kPosterizeOperation)); + uint8_t bits_ = op_params["bits"]; + std::string device_target = op_params["device_target"]; + *operation = std::make_shared(bits_, device_target); + return Status::OK(); +} + +MapTargetDevice PosterizeOperation::Type() { + if (device_target_ == "CPU") { + return MapTargetDevice::kCpu; + } else if (device_target_ == "Ascend") { + return MapTargetDevice::kAscend910B; + } else { + MS_LOG(ERROR) << "Posterize: Invalid device target. It's not CPU or Ascend."; + } + return MapTargetDevice::kInvalid; +} +#endif +} // namespace vision +} // namespace dataset +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/posterize_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/posterize_ir.h index 728bafb1..0b6f2a74 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/posterize_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/posterize_ir.h @@ -1,59 +1,59 @@ -/** - * Copyright 2022-2023 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_POSTERIZE_IR_H_ -#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_POSTERIZE_IR_H_ - -#include -#include -#include - -#include "include/api/status.h" -#include "mindspore-lite/minddata/dataset/include/dataset/constants.h" -#include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" -#include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" - -namespace mindspore { -namespace dataset { -namespace vision { -constexpr char kPosterizeOperation[] = "Posterize"; - -class PosterizeOperation : public TensorOperation { - public: - explicit PosterizeOperation(uint8_t bits, const std::string &device_target = "CPU"); - - ~PosterizeOperation() override; - - std::shared_ptr Build() override; - - Status ValidateParams() override; - - std::string Name() const override { return kPosterizeOperation; }; - - Status to_json(nlohmann::json *out_json) override; - - static Status from_json(nlohmann::json op_params, std::shared_ptr *operation); - - MapTargetDevice Type() override; - - private: - uint8_t bits_; - std::string device_target_; -}; -} // namespace vision -} // namespace dataset -} // namespace mindspore -#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_POSTERIZE_IR_H_ +/** + * Copyright 2022-2023 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_POSTERIZE_IR_H_ +#define MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_POSTERIZE_IR_H_ + +#include +#include +#include + +#include "include/api/status.h" +#include "mindspore-lite/minddata/dataset/include/dataset/constants.h" +#include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" +#include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" + +namespace mindspore::lite { +namespace dataset { +namespace vision { +constexpr char kPosterizeOperation[] = "Posterize"; + +class PosterizeOperation : public TensorOperation { + public: + explicit PosterizeOperation(uint8_t bits, const std::string &device_target = "CPU"); + + ~PosterizeOperation() override; + + std::shared_ptr Build() override; + + Status ValidateParams() override; + + std::string Name() const override { return kPosterizeOperation; }; + + Status to_json(nlohmann::json *out_json) override; + + static Status from_json(nlohmann::json op_params, std::shared_ptr *operation); + + MapTargetDevice Type() override; + + private: + uint8_t bits_; + std::string device_target_; +}; +} // namespace vision +} // namespace dataset +} // namespace mindspore::lite +#endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_POSTERIZE_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/rand_augment_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/rand_augment_ir.cc index 8a5f170e..a26d6c00 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/rand_augment_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/rand_augment_ir.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -94,4 +94,4 @@ Status RandAugmentOperation::from_json(nlohmann::json op_params, std::shared_ptr #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/rand_augment_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/rand_augment_ir.h index 116755b7..04808af6 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/rand_augment_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/rand_augment_ir.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" #include "mindspore-lite/minddata/dataset/kernels/image/rand_augment_op.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandAugmentOperation[] = "RandAugment"; @@ -61,5 +61,5 @@ class RandAugmentOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RAND_AUGMENT_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_adjust_sharpness_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_adjust_sharpness_ir.cc index 7aab0c3a..cca04bcf 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_adjust_sharpness_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_adjust_sharpness_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -66,4 +66,4 @@ Status RandomAdjustSharpnessOperation::from_json(nlohmann::json op_params, #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_adjust_sharpness_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_adjust_sharpness_ir.h index 8813d1b6..3a0ea873 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_adjust_sharpness_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_adjust_sharpness_ir.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomAdjustSharpnessOperation[] = "RandomAdjustSharpness"; @@ -55,5 +55,5 @@ class RandomAdjustSharpnessOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_ADJUST_SHARPNESS_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_affine_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_affine_ir.cc index 5ea27d74..52e1a488 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_affine_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_affine_ir.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr size_t dimension_zero = 0; @@ -176,4 +176,4 @@ Status RandomAffineOperation::from_json(nlohmann::json op_params, std::shared_pt } } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_affine_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_affine_ir.h index 08f398b4..db424fda 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_affine_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_affine_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomAffineOperation[] = "RandomAffine"; @@ -60,5 +60,5 @@ class RandomAffineOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_AFFINE_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_auto_contrast_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_auto_contrast_ir.cc index 470db715..d5d79185 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_auto_contrast_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_auto_contrast_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -74,4 +74,4 @@ Status RandomAutoContrastOperation::from_json(nlohmann::json op_params, std::sha #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_auto_contrast_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_auto_contrast_ir.h index 636c8d84..d4e11a1b 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_auto_contrast_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_auto_contrast_ir.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomAutoContrastOperation[] = "RandomAutoContrast"; @@ -56,5 +56,5 @@ class RandomAutoContrastOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_AUTO_CONTRAST_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_adjust_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_adjust_ir.cc index 5e04af69..ef8d7517 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_adjust_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_adjust_ir.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr size_t dimension_zero = 0; @@ -120,4 +120,4 @@ Status RandomColorAdjustOperation::from_json(nlohmann::json op_params, std::shar #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_adjust_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_adjust_ir.h index 25ef6231..f0d9017d 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_adjust_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_adjust_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomColorAdjustOperation[] = "RandomColorAdjust"; @@ -57,5 +57,5 @@ class RandomColorAdjustOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_COLOR_ADJUST_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_ir.cc index fd580aab..3a3a36c9 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_ir.cc @@ -23,7 +23,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -74,4 +74,4 @@ Status RandomColorOperation::from_json(nlohmann::json op_params, std::shared_ptr #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_ir.h index 80ddabe3..12b7a030 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_color_ir.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomColorOperation[] = "RandomColor"; @@ -53,5 +53,5 @@ class RandomColorOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_COLOR_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_decode_resize_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_decode_resize_ir.cc index b5f048ea..756f6483 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_decode_resize_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_decode_resize_ir.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -98,4 +98,4 @@ Status RandomCropDecodeResizeOperation::from_json(nlohmann::json op_params, #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_decode_resize_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_decode_resize_ir.h index 7c79b46d..2f16b78d 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_decode_resize_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_decode_resize_ir.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" #include "mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_ir.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomCropDecodeResizeOperation[] = "RandomCropDecodeResize"; @@ -53,5 +53,5 @@ class RandomCropDecodeResizeOperation : public RandomResizedCropOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_CROP_DECODE_RESIZE_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_ir.cc index bf0ebea3..7eca13bb 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_ir.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -144,4 +144,4 @@ Status RandomCropOperation::from_json(nlohmann::json op_params, std::shared_ptr< #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_ir.h index f4edfe4e..288be095 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomCropOperation[] = "RandomCrop"; @@ -58,5 +58,5 @@ class RandomCropOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_CROP_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_with_bbox_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_with_bbox_ir.cc index bb849eb5..89ddd434 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_with_bbox_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_with_bbox_ir.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -146,4 +146,4 @@ Status RandomCropWithBBoxOperation::from_json(nlohmann::json op_params, std::sha #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_with_bbox_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_with_bbox_ir.h index ee80b613..dad7eed1 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_with_bbox_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_crop_with_bbox_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomCropWithBBoxOperation[] = "RandomCropWithBBox"; @@ -58,5 +58,5 @@ class RandomCropWithBBoxOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_CROP_WITH_BBOX_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_equalize_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_equalize_ir.cc index 791c972c..6abe4da9 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_equalize_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_equalize_ir.cc @@ -22,7 +22,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -59,4 +59,4 @@ Status RandomEqualizeOperation::from_json(nlohmann::json op_params, std::shared_ #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_equalize_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_equalize_ir.h index edf31282..a2297984 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_equalize_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_equalize_ir.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomEqualizeOperation[] = "RandomEqualize"; @@ -54,5 +54,5 @@ class RandomEqualizeOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_EQUALIZE_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_ir.cc index 70694d29..331a0b7c 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -58,4 +58,4 @@ Status RandomHorizontalFlipOperation::from_json(nlohmann::json op_params, std::s #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_ir.h index 2a528d31..e2a9e5ae 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_ir.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomHorizontalFlipOperation[] = "RandomHorizontalFlip"; @@ -52,5 +52,5 @@ class RandomHorizontalFlipOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_HORIZONTAL_FLIP_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_with_bbox_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_with_bbox_ir.cc index f6ca83e9..0daf41d5 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_with_bbox_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_with_bbox_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -61,4 +61,4 @@ Status RandomHorizontalFlipWithBBoxOperation::from_json(nlohmann::json op_params #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_with_bbox_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_with_bbox_ir.h index 5cb86247..e7fc8513 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_with_bbox_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_horizontal_flip_with_bbox_ir.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomHorizontalFlipWithBBoxOperation[] = "RandomHorizontalFlipWithBBox"; @@ -52,5 +52,5 @@ class RandomHorizontalFlipWithBBoxOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_HORIZONTAL_FLIP_WITH_BBOX_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_invert_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_invert_ir.cc index 2546dae6..c4de8dda 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_invert_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_invert_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -58,4 +58,4 @@ Status RandomInvertOperation::from_json(nlohmann::json op_params, std::shared_pt #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_invert_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_invert_ir.h index 841cf1b3..27efdfac 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_invert_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_invert_ir.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomInvertOperation[] = "RandomInvert"; @@ -54,5 +54,5 @@ class RandomInvertOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_INVERT_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_lighting_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_lighting_ir.cc index 022853ce..e7eeeb21 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_lighting_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_lighting_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -60,4 +60,4 @@ Status RandomLightingOperation::from_json(nlohmann::json op_params, std::shared_ #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_lighting_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_lighting_ir.h index da7cee06..9f23433f 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_lighting_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_lighting_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomLightingOperation[] = "RandomLighting"; @@ -53,5 +53,5 @@ class RandomLightingOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_LIGHTING_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_posterize_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_posterize_ir.cc index cd2b359b..56dd1fb9 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_posterize_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_posterize_ir.cc @@ -23,7 +23,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -87,4 +87,4 @@ Status RandomPosterizeOperation::from_json(nlohmann::json op_params, std::shared #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_posterize_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_posterize_ir.h index ad24fb80..02c0e46e 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_posterize_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_posterize_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomPosterizeOperation[] = "RandomPosterize"; @@ -53,5 +53,5 @@ class RandomPosterizeOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_POSTERIZE_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_ir.cc index 95f7f259..4549084d 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_ir.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -74,4 +74,4 @@ Status RandomResizeOperation::from_json(nlohmann::json op_params, std::shared_pt #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_ir.h index be21f9b9..b943767e 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomResizeOperation[] = "RandomResize"; @@ -53,5 +53,5 @@ class RandomResizeOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_RESIZE_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_with_bbox_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_with_bbox_ir.cc index 67cc1df5..7cfc200a 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_with_bbox_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_with_bbox_ir.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -76,4 +76,4 @@ Status RandomResizeWithBBoxOperation::from_json(nlohmann::json op_params, std::s #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_with_bbox_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_with_bbox_ir.h index cdce4e0c..ca100b39 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_with_bbox_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resize_with_bbox_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomResizeWithBBoxOperation[] = "RandomResizeWithBBox"; @@ -53,5 +53,5 @@ class RandomResizeWithBBoxOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_RESIZE_WITH_BBOX_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_ir.cc index 475c7baa..98f45bb9 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_ir.cc @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -114,4 +114,4 @@ Status RandomResizedCropOperation::from_json(nlohmann::json op_params, std::shar #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_ir.h index 388f730f..5ea8e207 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomResizedCropOperation[] = "RandomResizedCrop"; @@ -64,5 +64,5 @@ class RandomResizedCropOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_RESIZED_CROP_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_with_bbox_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_with_bbox_ir.cc index ff3301d8..ca7bfe7e 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_with_bbox_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_with_bbox_ir.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -112,4 +112,4 @@ Status RandomResizedCropWithBBoxOperation::from_json(nlohmann::json op_params, #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_with_bbox_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_with_bbox_ir.h index a7613605..6e402f7d 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_with_bbox_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_with_bbox_ir.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" #include "mindspore-lite/minddata/dataset/kernels/ir/vision/random_resized_crop_ir.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomResizedCropWithBBoxOperation[] = "RandomResizedCropWithBBox"; @@ -60,5 +60,5 @@ class RandomResizedCropWithBBoxOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_RESIZED_CROP_WITH_BBOX_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_rotation_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_rotation_ir.cc index 9054a7d7..7c9686b5 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_rotation_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_rotation_ir.cc @@ -24,7 +24,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -142,4 +142,4 @@ Status RandomRotationOperation::from_json(nlohmann::json op_params, std::shared_ #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_rotation_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_rotation_ir.h index f3821f0d..204b1a53 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_rotation_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_rotation_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomRotationOperation[] = "RandomRotation"; @@ -58,5 +58,5 @@ class RandomRotationOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_ROTATION_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_select_subpolicy_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_select_subpolicy_ir.cc index 0569d46b..e933d6c4 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_select_subpolicy_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_select_subpolicy_ir.cc @@ -24,7 +24,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -129,4 +129,4 @@ Status RandomSelectSubpolicyOperation::from_json(nlohmann::json op_params, #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_select_subpolicy_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_select_subpolicy_ir.h index 6fd37c1e..3b974ae9 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_select_subpolicy_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_select_subpolicy_ir.h @@ -28,7 +28,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomSelectSubpolicyOperation[] = "RandomSelectSubpolicy"; @@ -55,5 +55,5 @@ class RandomSelectSubpolicyOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_SELECT_SUBPOLICY_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_sharpness_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_sharpness_ir.cc index 724b8141..0ec2addc 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_sharpness_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_sharpness_ir.cc @@ -23,7 +23,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -76,4 +76,4 @@ Status RandomSharpnessOperation::from_json(nlohmann::json op_params, std::shared #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_sharpness_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_sharpness_ir.h index f5e8d6d8..5f0d2e81 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_sharpness_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_sharpness_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomSharpnessOperation[] = "RandomSharpness"; @@ -53,5 +53,5 @@ class RandomSharpnessOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_SHARPNESS_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_solarize_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_solarize_ir.cc index b0541258..5a50fafc 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_solarize_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_solarize_ir.cc @@ -23,7 +23,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -81,4 +81,4 @@ Status RandomSolarizeOperation::from_json(nlohmann::json op_params, std::shared_ #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_solarize_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_solarize_ir.h index faa8a854..83d3ccfb 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_solarize_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_solarize_ir.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomSolarizeOperation[] = "RandomSolarize"; @@ -53,5 +53,5 @@ class RandomSolarizeOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_SOLARIZE_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_ir.cc index 5ac0377b..65fdaf58 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -58,4 +58,4 @@ Status RandomVerticalFlipOperation::from_json(nlohmann::json op_params, std::sha #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_ir.h index a900b701..cc23951c 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_ir.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomVerticalFlipOperation[] = "RandomVerticalFlip"; @@ -52,5 +52,5 @@ class RandomVerticalFlipOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_VERTICAL_FLIP_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_with_bbox_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_with_bbox_ir.cc index c7e8715d..d938ad66 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_with_bbox_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_with_bbox_ir.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/kernels/ir/validators.h" #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #ifndef ENABLE_ANDROID @@ -61,4 +61,4 @@ Status RandomVerticalFlipWithBBoxOperation::from_json(nlohmann::json op_params, #endif } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_with_bbox_ir.h b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_with_bbox_ir.h index ab6756e8..d1d73b03 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_with_bbox_ir.h +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/random_vertical_flip_with_bbox_ir.h @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/include/dataset/transforms.h" #include "mindspore-lite/minddata/dataset/kernels/ir/tensor_operation.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { constexpr char kRandomVerticalFlipWithBBoxOperation[] = "RandomVerticalFlipWithBBox"; @@ -52,5 +52,5 @@ class RandomVerticalFlipWithBBoxOperation : public TensorOperation { }; } // namespace vision } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_IR_VISION_RANDOM_VERTICAL_FLIP_WITH_BBOX_IR_H_ diff --git a/mindspore-lite/minddata/dataset/kernels/ir/vision/rescale_ir.cc b/mindspore-lite/minddata/dataset/kernels/ir/vision/rescale_ir.cc index 515c5859..fabb1ce8 100644 --- a/mindspore-lite/minddata/dataset/kernels/ir/vision/rescale_ir.cc +++ b/mindspore-lite/minddata/dataset/kernels/ir/vision/rescale_ir.cc @@ -20,7 +20,7 @@ #endif #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace vision { #if !defined(ENABLE_ANDROID) @@ -65,4 +65,4 @@ Status RescaleOperation::from_json(nlohmann::json op_params, std::shared_ptr #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Name: Compute() // Description: This Compute() take 1 Tensor and produce 1 Tensor. @@ -80,4 +80,4 @@ RandomTensorOp::RandomTensorOp() { void RandomTensorOp::SetSeed(uint32_t seed) { random_generator_.seed(seed); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/kernels/tensor_op.h b/mindspore-lite/minddata/dataset/kernels/tensor_op.h index c4981fd8..7d3673d7 100644 --- a/mindspore-lite/minddata/dataset/kernels/tensor_op.h +++ b/mindspore-lite/minddata/dataset/kernels/tensor_op.h @@ -49,7 +49,7 @@ } \ } while (false) -namespace mindspore { +namespace mindspore::lite { namespace dataset { // base class constexpr char kTensorOp[] = "TensorOp"; @@ -363,5 +363,5 @@ class RandomTensorOp : public TensorOp { std::mt19937 random_generator_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_KERNELS_TENSOR_OP_H_ diff --git a/mindspore-lite/minddata/dataset/liteapi/include/datasets.h b/mindspore-lite/minddata/dataset/liteapi/include/datasets.h index b8b201a4..f692b380 100644 --- a/mindspore-lite/minddata/dataset/liteapi/include/datasets.h +++ b/mindspore-lite/minddata/dataset/liteapi/include/datasets.h @@ -36,7 +36,7 @@ #include "include/dataset/samplers.h" #include "include/dataset/transforms.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class Tensor; class TensorShape; @@ -708,5 +708,5 @@ inline std::shared_ptr DATASET_API Mnist(const std::string &datase return std::make_shared(StringToChar(dataset_dir), StringToChar(usage), sampler, cache); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_DATASETS_H_ diff --git a/mindspore-lite/minddata/dataset/util/allocator.h b/mindspore-lite/minddata/dataset/util/allocator.h index acd92f20..760abae2 100644 --- a/mindspore-lite/minddata/dataset/util/allocator.h +++ b/mindspore-lite/minddata/dataset/util/allocator.h @@ -23,7 +23,7 @@ #include #include "mindspore-lite/minddata/dataset/util/memory_pool.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // The following conforms to the requirements of // std::allocator. Do not rename/change any needed @@ -199,6 +199,6 @@ class MemGuard { std::unique_ptr> ptr_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_ALLOCATOR_H_ diff --git a/mindspore-lite/minddata/dataset/util/arena.cc b/mindspore-lite/minddata/dataset/util/arena.cc index 56d119a0..056b7e41 100644 --- a/mindspore-lite/minddata/dataset/util/arena.cc +++ b/mindspore-lite/minddata/dataset/util/arena.cc @@ -22,7 +22,7 @@ #include "mindspore/ccsrc/runtime/hardware/device_context_manager.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { struct MemHdr { uint32_t sig; @@ -290,4 +290,4 @@ Status Arena::CreateArena(std::shared_ptr *p_ba, size_t val_in_MB, bool i return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/arena.h b/mindspore-lite/minddata/dataset/util/arena.h index d8c060bc..a418f940 100644 --- a/mindspore-lite/minddata/dataset/util/arena.h +++ b/mindspore-lite/minddata/dataset/util/arena.h @@ -26,7 +26,7 @@ #define ARENA_LOG_BLK_SZ (6u) #define ARENA_BLK_SZ (static_cast(1u << ARENA_LOG_BLK_SZ)) #define ARENA_WALL_OVERHEAD_SZ 32 -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// This is a memory arena based on a treap data structure. /// The constructor of the Arena takes the size of the initial memory size (in MB). @@ -151,6 +151,6 @@ class Arena : public MemoryPool { Status Init(); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_ARENA_H_ diff --git a/mindspore-lite/minddata/dataset/util/auto_index.h b/mindspore-lite/minddata/dataset/util/auto_index.h index 3a31e150..b1d87c77 100644 --- a/mindspore-lite/minddata/dataset/util/auto_index.h +++ b/mindspore-lite/minddata/dataset/util/auto_index.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/btree.h" #include "mindspore-lite/minddata/dataset/util/system_pool.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// This is a B+ tree with generated int64_t value as key. /// Use minKey() function to query the min key. @@ -96,5 +96,5 @@ class AutoIndexObj : public BPlusTree, T> { std::atomic inx_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_AUTO_INDEX_H_ diff --git a/mindspore-lite/minddata/dataset/util/bit.h b/mindspore-lite/minddata/dataset/util/bit.h index e4872a36..cd49127d 100644 --- a/mindspore-lite/minddata/dataset/util/bit.h +++ b/mindspore-lite/minddata/dataset/util/bit.h @@ -16,7 +16,7 @@ #ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_BIT_H_ #define MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_BIT_H_ -namespace mindspore { +namespace mindspore::lite { namespace dataset { template Enum operator|(Enum lhs, Enum rhs) { @@ -70,6 +70,6 @@ Enum operator~(Enum v) { return static_cast(~static_cast(v)); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_BIT_H_ diff --git a/mindspore-lite/minddata/dataset/util/btree.h b/mindspore-lite/minddata/dataset/util/btree.h index 35170da9..3d9f460f 100644 --- a/mindspore-lite/minddata/dataset/util/btree.h +++ b/mindspore-lite/minddata/dataset/util/btree.h @@ -30,7 +30,7 @@ #include "mindspore-lite/minddata/dataset/util/services.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Default traits for a B+ tree struct BPlusTreeTraits { @@ -522,7 +522,7 @@ class BPlusTree { value_type operator[](key_type key); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_INDEX_H_ #include "mindspore-lite/minddata/dataset/util/btree_impl.tpp" diff --git a/mindspore-lite/minddata/dataset/util/buddy.cc b/mindspore-lite/minddata/dataset/util/buddy.cc index a993a414..c1b10456 100644 --- a/mindspore-lite/minddata/dataset/util/buddy.cc +++ b/mindspore-lite/minddata/dataset/util/buddy.cc @@ -29,7 +29,7 @@ inline uint64_t BitEx(uint64_t rhs, uint64_t lhs) { return rhs ^ lhs; } inline uint64_t BitAnd(uint64_t rhs, uint64_t lhs) { return rhs & lhs; } -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status BuddySpace::Init() { const uint64_t kBitOffset = 3; @@ -412,4 +412,4 @@ Status BuddySpace::CreateBuddySpace(std::unique_ptr *out_bs, int log return rc; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/buddy.h b/mindspore-lite/minddata/dataset/util/buddy.h index 9f4ecea4..aae667f9 100644 --- a/mindspore-lite/minddata/dataset/util/buddy.h +++ b/mindspore-lite/minddata/dataset/util/buddy.h @@ -32,7 +32,7 @@ using log_t = int; #define TWO_BIT 0x20 #define MORE_BIT 0x10 #define NOSPACE ((addr_t)(-1)) -namespace mindspore { +namespace mindspore::lite { namespace dataset { struct BSpaceDescriptor { int32_t sig; @@ -122,6 +122,6 @@ class BuddySpace { void FreeBuddySeg(rel_addr_t addr, size_t blk_size, size_t req_size); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_BUDDY_H_ diff --git a/mindspore-lite/minddata/dataset/util/circular_pool.cc b/mindspore-lite/minddata/dataset/util/circular_pool.cc index 0a193bc4..d994080f 100644 --- a/mindspore-lite/minddata/dataset/util/circular_pool.cc +++ b/mindspore-lite/minddata/dataset/util/circular_pool.cc @@ -21,7 +21,7 @@ #include "include/securec.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status CircularPool::AddOneArena() { Status rc; @@ -224,4 +224,4 @@ Status CircularPool::CreateCircularPool(std::shared_ptr *out_pool, i CircularPool::~CircularPool() = default; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/circular_pool.h b/mindspore-lite/minddata/dataset/util/circular_pool.h index be34b636..2da62dcc 100644 --- a/mindspore-lite/minddata/dataset/util/circular_pool.h +++ b/mindspore-lite/minddata/dataset/util/circular_pool.h @@ -23,7 +23,7 @@ #include "mindspore-lite/minddata/dataset/util/arena.h" #include "mindspore-lite/minddata/dataset/util/lock.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using ListOfArenas = std::vector>; @@ -104,6 +104,6 @@ class CircularPool : public MemoryPool { Status AddOneArena(); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_CIRCULAR_POOL_H_ diff --git a/mindspore-lite/minddata/dataset/util/cond_var.cc b/mindspore-lite/minddata/dataset/util/cond_var.cc index 77e0aa7c..8508de8d 100644 --- a/mindspore-lite/minddata/dataset/util/cond_var.cc +++ b/mindspore-lite/minddata/dataset/util/cond_var.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/util/services.h" #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { CondVar::CondVar() : svc_(nullptr), my_name_(Services::GetUniqueID()) {} @@ -121,4 +121,4 @@ Status CondVar::Deregister() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/cond_var.h b/mindspore-lite/minddata/dataset/util/cond_var.h index 6b83a88f..4c61cc97 100644 --- a/mindspore-lite/minddata/dataset/util/cond_var.h +++ b/mindspore-lite/minddata/dataset/util/cond_var.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/intrp_resource.h" #include "mindspore-lite/minddata/dataset/util/intrp_service.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class CondVar : public IntrpResource { public: @@ -62,5 +62,5 @@ class CondVar : public IntrpResource { std::mutex interrupt_mux_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_COND_VAR_H_ diff --git a/mindspore-lite/minddata/dataset/util/ftok_key.cc b/mindspore-lite/minddata/dataset/util/ftok_key.cc index 07be2469..7186b37f 100644 --- a/mindspore-lite/minddata/dataset/util/ftok_key.cc +++ b/mindspore-lite/minddata/dataset/util/ftok_key.cc @@ -46,7 +46,7 @@ #include "mindspore-lite/minddata/dataset/util/task_manager.h" #include "mindspore-lite/minddata/dataset/kernels/image/image_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #if !defined(__APPLE__) && !defined(BUILD_LITE) && !defined(_WIN32) && !defined(_WIN64) && !defined(__ANDROID__) && \ !defined(ANDROID) @@ -80,4 +80,4 @@ Status GetKey(key_t *key) { } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/ftok_key.h b/mindspore-lite/minddata/dataset/util/ftok_key.h index 615d268b..2c7c6545 100644 --- a/mindspore-lite/minddata/dataset/util/ftok_key.h +++ b/mindspore-lite/minddata/dataset/util/ftok_key.h @@ -45,7 +45,7 @@ namespace platform = mindspore; namespace platform = mindspore::lite; #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { #if !defined(__APPLE__) && !defined(BUILD_LITE) && !defined(_WIN32) && !defined(_WIN64) && !defined(__ANDROID__) && \ !defined(ANDROID) @@ -53,6 +53,6 @@ extern std::atomic inc_id; Status GetKey(key_t *key); #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_FTOK_KEY_H_ diff --git a/mindspore-lite/minddata/dataset/util/gil_scoped.h b/mindspore-lite/minddata/dataset/util/gil_scoped.h index 15def6cf..7b7fb708 100644 --- a/mindspore-lite/minddata/dataset/util/gil_scoped.h +++ b/mindspore-lite/minddata/dataset/util/gil_scoped.h @@ -23,7 +23,7 @@ namespace py = pybind11; -namespace mindspore { +namespace mindspore::lite { namespace dataset { class GilAcquireWithCheck { @@ -56,5 +56,5 @@ class GilAcquireWithCheck { std::unique_ptr acquire_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_GIL_SCOPED_H_ diff --git a/mindspore-lite/minddata/dataset/util/intrp_resource.h b/mindspore-lite/minddata/dataset/util/intrp_resource.h index d5f13cca..47c3de75 100644 --- a/mindspore-lite/minddata/dataset/util/intrp_resource.h +++ b/mindspore-lite/minddata/dataset/util/intrp_resource.h @@ -19,7 +19,7 @@ #include #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class IntrpResource { public: @@ -48,5 +48,5 @@ class IntrpResource { std::atomic st_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_INTRP_RESOURCE_H_ diff --git a/mindspore-lite/minddata/dataset/util/intrp_service.cc b/mindspore-lite/minddata/dataset/util/intrp_service.cc index 32c72b3d..5ad76bb1 100644 --- a/mindspore-lite/minddata/dataset/util/intrp_service.cc +++ b/mindspore-lite/minddata/dataset/util/intrp_service.cc @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/util/services.h" #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int64_t kServiceRetryGetUniqueIdInterVal = 10; IntrpService::IntrpService() try : high_water_mark_(0) { (void)ServiceStart(); } catch (const std::exception &e) { @@ -96,4 +96,4 @@ void IntrpService::InterruptAll() noexcept { } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/intrp_service.h b/mindspore-lite/minddata/dataset/util/intrp_service.h index d47e7ccf..19b83777 100644 --- a/mindspore-lite/minddata/dataset/util/intrp_service.h +++ b/mindspore-lite/minddata/dataset/util/intrp_service.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/service.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { using SvcAllocator = Allocator>; @@ -57,5 +57,5 @@ class IntrpService : public Service { std::map all_intrp_resources_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_INTRP_SERVICE_H_ diff --git a/mindspore-lite/minddata/dataset/util/json_helper.cc b/mindspore-lite/minddata/dataset/util/json_helper.cc index 63d58748..21e0e6d8 100644 --- a/mindspore-lite/minddata/dataset/util/json_helper.cc +++ b/mindspore-lite/minddata/dataset/util/json_helper.cc @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/util/path.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Create a numbered json file from image folder Status JsonHelper::CreateAlbum(const std::string &in_dir, const std::string &out_dir) { @@ -158,4 +158,4 @@ size_t JsonHelper::DumpData(const unsigned char *tensor_addr, const size_t &tens return tensor_size; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/json_helper.h b/mindspore-lite/minddata/dataset/util/json_helper.h index 6ba72bbb..04304496 100644 --- a/mindspore-lite/minddata/dataset/util/json_helper.h +++ b/mindspore-lite/minddata/dataset/util/json_helper.h @@ -37,7 +37,7 @@ namespace platform = mindspore; namespace platform = mindspore::lite; #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief Simple class to do data manipulation, contains helper function to update json files in dataset @@ -254,6 +254,6 @@ class JsonHelper { } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_JSON_HELPER_H_ diff --git a/mindspore-lite/minddata/dataset/util/list.h b/mindspore-lite/minddata/dataset/util/list.h index 776827aa..29582385 100644 --- a/mindspore-lite/minddata/dataset/util/list.h +++ b/mindspore-lite/minddata/dataset/util/list.h @@ -21,7 +21,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { template struct Node { @@ -211,6 +211,6 @@ struct List { } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_LIST_H_ diff --git a/mindspore-lite/minddata/dataset/util/lock.cc b/mindspore-lite/minddata/dataset/util/lock.cc index 49083438..19df8142 100644 --- a/mindspore-lite/minddata/dataset/util/lock.cc +++ b/mindspore-lite/minddata/dataset/util/lock.cc @@ -15,7 +15,7 @@ */ #include "mindspore-lite/minddata/dataset/util/lock.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { void SpinLock::Lock() { while (true) { @@ -167,4 +167,4 @@ void LockGuard::Lock() { own_lock_ = true; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/lock.h b/mindspore-lite/minddata/dataset/util/lock.h index 35f15ef9..fa47f4da 100644 --- a/mindspore-lite/minddata/dataset/util/lock.h +++ b/mindspore-lite/minddata/dataset/util/lock.h @@ -20,7 +20,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { class SpinLock { public: @@ -168,6 +168,6 @@ class LockGuard { bool own_lock_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_LOCK_H_ diff --git a/mindspore-lite/minddata/dataset/util/md_log_adapter.cc b/mindspore-lite/minddata/dataset/util/md_log_adapter.cc index 0a1dd83a..85c746de 100644 --- a/mindspore-lite/minddata/dataset/util/md_log_adapter.cc +++ b/mindspore-lite/minddata/dataset/util/md_log_adapter.cc @@ -18,7 +18,7 @@ #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status MDLogAdapter::Apply(Status *rc) { std::string status_msg = ConstructMsg(rc->StatusCode(), rc->CodeAsString(rc->StatusCode()), "", rc->GetLineOfCode(), @@ -81,4 +81,4 @@ std::string MDLogAdapter::ConstructMsg(const enum StatusCode &status_code, const return ss.str(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/md_log_adapter.h b/mindspore-lite/minddata/dataset/util/md_log_adapter.h index d93ecd78..b2dd7791 100644 --- a/mindspore-lite/minddata/dataset/util/md_log_adapter.h +++ b/mindspore-lite/minddata/dataset/util/md_log_adapter.h @@ -22,7 +22,7 @@ #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class MDLogAdapter { public: @@ -37,5 +37,5 @@ class MDLogAdapter { const std::string &err_description); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_MD_LOG_ADAPTER_H diff --git a/mindspore-lite/minddata/dataset/util/memory_pool.cc b/mindspore-lite/minddata/dataset/util/memory_pool.cc index 7f9b0dca..8d14387d 100644 --- a/mindspore-lite/minddata/dataset/util/memory_pool.cc +++ b/mindspore-lite/minddata/dataset/util/memory_pool.cc @@ -16,7 +16,7 @@ #include "mindspore-lite/minddata/dataset/util/memory_pool.h" #include "include/securec.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status DeMalloc(std::size_t s, void **p, bool init_to_zero = false) { if (p == nullptr) { @@ -34,7 +34,7 @@ Status DeMalloc(std::size_t s, void **p, bool init_to_zero = false) { } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite void *operator new(std::size_t s, mindspore::Status *rc, std::shared_ptr b) { void *ptr = nullptr; diff --git a/mindspore-lite/minddata/dataset/util/memory_pool.h b/mindspore-lite/minddata/dataset/util/memory_pool.h index 0da6a45d..e4d1038d 100644 --- a/mindspore-lite/minddata/dataset/util/memory_pool.h +++ b/mindspore-lite/minddata/dataset/util/memory_pool.h @@ -21,7 +21,7 @@ #include #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Abstract class of a memory pool class MemoryPool { @@ -46,7 +46,7 @@ class MemoryPool { Status DeMalloc(std::size_t s, void **p, bool); } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite void *operator new(std::size_t, mindspore::Status *, std::shared_ptr); diff --git a/mindspore-lite/minddata/dataset/util/monitor.cc b/mindspore-lite/minddata/dataset/util/monitor.cc index 35edd98d..cbc903b8 100644 --- a/mindspore-lite/minddata/dataset/util/monitor.cc +++ b/mindspore-lite/minddata/dataset/util/monitor.cc @@ -17,7 +17,7 @@ #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { #if !defined(__APPLE__) && !defined(BUILD_LITE) && !defined(_WIN32) && !defined(_WIN64) && !defined(__ANDROID__) && \ !defined(ANDROID) @@ -58,4 +58,4 @@ Status MonitorSubprocess(int pid) { } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/monitor.h b/mindspore-lite/minddata/dataset/util/monitor.h index 2cbeee82..3b7eda05 100644 --- a/mindspore-lite/minddata/dataset/util/monitor.h +++ b/mindspore-lite/minddata/dataset/util/monitor.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #if !defined(__APPLE__) && !defined(BUILD_LITE) && !defined(_WIN32) && !defined(_WIN64) && !defined(__ANDROID__) && \ !defined(ANDROID) @@ -33,6 +33,6 @@ namespace dataset { Status MonitorSubprocess(int pid); #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_MONITOR_H_ diff --git a/mindspore-lite/minddata/dataset/util/path.cc b/mindspore-lite/minddata/dataset/util/path.cc index 7ebc7265..24daa9d4 100644 --- a/mindspore-lite/minddata/dataset/util/path.cc +++ b/mindspore-lite/minddata/dataset/util/path.cc @@ -35,7 +35,7 @@ #include "utils/ms_utils.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #if defined(_WIN32) || defined(_WIN64) char Path::separator_ = '\\'; @@ -410,4 +410,4 @@ std::ostream &operator<<(std::ostream &os, const Path &s) { return os; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/path.h b/mindspore-lite/minddata/dataset/util/path.h index ae0c690d..06e0aacd 100644 --- a/mindspore-lite/minddata/dataset/util/path.h +++ b/mindspore-lite/minddata/dataset/util/path.h @@ -21,7 +21,7 @@ #include "utils/os.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class Path { public: @@ -126,6 +126,6 @@ class Path { std::string path_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_PATH_H_ diff --git a/mindspore-lite/minddata/dataset/util/queue.h b/mindspore-lite/minddata/dataset/util/queue.h index 20dd0c25..ea9d28c9 100644 --- a/mindspore-lite/minddata/dataset/util/queue.h +++ b/mindspore-lite/minddata/dataset/util/queue.h @@ -29,7 +29,7 @@ #include "mindspore-lite/minddata/dataset/util/cond_var.h" #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // A simple thread safe queue using a fixed size array template @@ -315,5 +315,5 @@ class QueueList { mutable std::mutex mux_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_QUEUE_H_ diff --git a/mindspore-lite/minddata/dataset/util/queue_map.h b/mindspore-lite/minddata/dataset/util/queue_map.h index 52bd3795..c4eaf1d4 100644 --- a/mindspore-lite/minddata/dataset/util/queue_map.h +++ b/mindspore-lite/minddata/dataset/util/queue_map.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/util/system_pool.h" #include "mindspore-lite/minddata/dataset/util/semaphore.h" #include "mindspore-lite/minddata/dataset/util/services.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { template /// \brief QueueMap is like a Queue but instead of there is a map of deque. @@ -160,6 +160,6 @@ class QueueMap { std::atomic num_rows_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_QUEUE_MAP_H_ diff --git a/mindspore-lite/minddata/dataset/util/random.h b/mindspore-lite/minddata/dataset/util/random.h index 54604fa1..93ac16a0 100644 --- a/mindspore-lite/minddata/dataset/util/random.h +++ b/mindspore-lite/minddata/dataset/util/random.h @@ -33,7 +33,7 @@ #include "mindspore-lite/minddata/dataset/core/global_context.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { inline std::mt19937 GetRandomDevice() { #if defined(_WIN32) || defined(_WIN64) @@ -73,6 +73,6 @@ inline uint32_t GetSeed() { } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_RANDOM_H_ diff --git a/mindspore-lite/minddata/dataset/util/rdr.cc b/mindspore-lite/minddata/dataset/util/rdr.cc index aa375076..42d9f57c 100644 --- a/mindspore-lite/minddata/dataset/util/rdr.cc +++ b/mindspore-lite/minddata/dataset/util/rdr.cc @@ -19,7 +19,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const int32_t kMdRdrRecordLimit = 10; @@ -167,4 +167,4 @@ Status MDChannelInfo::RecordPushEndTime() { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/rdr.h b/mindspore-lite/minddata/dataset/util/rdr.h index c8f3861e..8f4e4e4a 100644 --- a/mindspore-lite/minddata/dataset/util/rdr.h +++ b/mindspore-lite/minddata/dataset/util/rdr.h @@ -21,7 +21,7 @@ #include #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class MDChannelInfo { public: @@ -59,6 +59,6 @@ class MDChannelInfo { std::deque push_end_time_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_RDR_H_ diff --git a/mindspore-lite/minddata/dataset/util/semaphore.cc b/mindspore-lite/minddata/dataset/util/semaphore.cc index 40a5a4bf..c26a6085 100644 --- a/mindspore-lite/minddata/dataset/util/semaphore.cc +++ b/mindspore-lite/minddata/dataset/util/semaphore.cc @@ -16,7 +16,7 @@ #include "mindspore-lite/minddata/dataset/util/semaphore.h" #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status Semaphore::P() { std::unique_lock lck(mutex_); @@ -35,4 +35,4 @@ Status Semaphore::Deregister() { return (wait_cond_.Deregister()); } void Semaphore::ResetIntrpState() { wait_cond_.ResetIntrpState(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/semaphore.h b/mindspore-lite/minddata/dataset/util/semaphore.h index f3bf736f..7ff155e2 100644 --- a/mindspore-lite/minddata/dataset/util/semaphore.h +++ b/mindspore-lite/minddata/dataset/util/semaphore.h @@ -18,7 +18,7 @@ #include "mindspore-lite/minddata/dataset/util/cond_var.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TaskGroup; @@ -50,5 +50,5 @@ class Semaphore { CondVar wait_cond_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_SEMAPHORE_H_ diff --git a/mindspore-lite/minddata/dataset/util/service.cc b/mindspore-lite/minddata/dataset/util/service.cc index a3ca1db7..fffabbb5 100644 --- a/mindspore-lite/minddata/dataset/util/service.cc +++ b/mindspore-lite/minddata/dataset/util/service.cc @@ -16,7 +16,7 @@ #include "mindspore-lite/minddata/dataset/util/service.h" #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { Status Service::ServiceStart() { do { @@ -79,4 +79,4 @@ Status Service::ServiceStop() noexcept { } while (true); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/service.h b/mindspore-lite/minddata/dataset/util/service.h index a97be754..58a55c34 100644 --- a/mindspore-lite/minddata/dataset/util/service.h +++ b/mindspore-lite/minddata/dataset/util/service.h @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/util/lock.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class Service { public: @@ -49,5 +49,5 @@ class Service { RWLock state_lock_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_SERVICE_H_ diff --git a/mindspore-lite/minddata/dataset/util/services.cc b/mindspore-lite/minddata/dataset/util/services.cc index fc4f214c..3fb6f207 100644 --- a/mindspore-lite/minddata/dataset/util/services.cc +++ b/mindspore-lite/minddata/dataset/util/services.cc @@ -30,7 +30,7 @@ #define LOGIN_NAME_MAX 256 #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { std::unique_ptr Services::instance_ = nullptr; std::once_flag Services::init_instance_flag_; @@ -119,4 +119,4 @@ Services::~Services() noexcept { } } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/services.h b/mindspore-lite/minddata/dataset/util/services.h index f209668d..431d1b38 100644 --- a/mindspore-lite/minddata/dataset/util/services.h +++ b/mindspore-lite/minddata/dataset/util/services.h @@ -30,7 +30,7 @@ #define UNIQUEID_LEN 36 #define UNIQUEID_LIST_LIMITS 1024 #define UNIQUEID_HALF_INDEX ((UNIQUEID_LIST_LIMITS) / 2) -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TaskManager; @@ -114,6 +114,6 @@ class Services { Status CreateAllInstances(); }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_SERVICES_H_ diff --git a/mindspore-lite/minddata/dataset/util/shared_mem.cc b/mindspore-lite/minddata/dataset/util/shared_mem.cc index d0bcf860..47c79946 100644 --- a/mindspore-lite/minddata/dataset/util/shared_mem.cc +++ b/mindspore-lite/minddata/dataset/util/shared_mem.cc @@ -25,7 +25,7 @@ #include #include -namespace mindspore::dataset { +namespace mindspore::lite::dataset { #if !defined(_WIN32) && !defined(_WIN64) std::string GenerateShmName() { static std::atomic counter{0}; @@ -117,4 +117,4 @@ void SharedMem::Close() { MS_LOG(INFO) << "Shared memory " << name_ << " has been closed."; } #endif -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset diff --git a/mindspore-lite/minddata/dataset/util/shared_mem.h b/mindspore-lite/minddata/dataset/util/shared_mem.h index 60aca87e..7404e3d6 100644 --- a/mindspore-lite/minddata/dataset/util/shared_mem.h +++ b/mindspore-lite/minddata/dataset/util/shared_mem.h @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { std::string GenerateShmName(); class SharedMem { @@ -48,5 +48,5 @@ class SharedMem { size_t size_; void *buf_; }; -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_SHARED_MEM_H_ diff --git a/mindspore-lite/minddata/dataset/util/sig_handler.cc b/mindspore-lite/minddata/dataset/util/sig_handler.cc index 92a268f5..4d1351df 100644 --- a/mindspore-lite/minddata/dataset/util/sig_handler.cc +++ b/mindspore-lite/minddata/dataset/util/sig_handler.cc @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore::dataset { +namespace mindspore::lite::dataset { static std::unordered_map> worker_groups = {}; #if !defined(_WIN32) && !defined(_WIN64) /// \brief Set handler for the specified signal. @@ -222,4 +222,4 @@ void DeregisterWorkerPIDs(int64_t id) { MS_LOG(INFO) << "Watch dog stops monitoring process(es): " << GetPIDsString(worker_groups[id]); (void)worker_groups.erase(id); } -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset diff --git a/mindspore-lite/minddata/dataset/util/sig_handler.h b/mindspore-lite/minddata/dataset/util/sig_handler.h index dd7e96c9..2df6530e 100644 --- a/mindspore-lite/minddata/dataset/util/sig_handler.h +++ b/mindspore-lite/minddata/dataset/util/sig_handler.h @@ -19,7 +19,7 @@ #include #include -namespace mindspore::dataset { +namespace mindspore::lite::dataset { /// \brief Register the custom signal handlers. extern void RegisterHandlers(); @@ -34,5 +34,5 @@ extern void RegisterWorkerPIDs(int64_t id, const std::set &pids); /// \brief Deregister workers to be monitored by the watch dog. extern void DeregisterWorkerPIDs(int64_t id); -} // namespace mindspore::dataset +} // namespace mindspore::lite::dataset #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_SIG_HANDLER_H_ diff --git a/mindspore-lite/minddata/dataset/util/slice.cc b/mindspore-lite/minddata/dataset/util/slice.cc index a93e6d8c..ef96707c 100644 --- a/mindspore-lite/minddata/dataset/util/slice.cc +++ b/mindspore-lite/minddata/dataset/util/slice.cc @@ -15,7 +15,7 @@ */ #include "mindspore-lite/minddata/dataset/util/slice.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { WritableSlice::WritableSlice(const WritableSlice &src, off64_t offset, size_t len) : ReadableSlice(src, offset, len) { mutable_data_ = static_cast(src.mutable_data_) + offset; @@ -35,4 +35,4 @@ Status WritableSlice::Copy(WritableSlice *dest, const ReadableSlice &src) { return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/slice.h b/mindspore-lite/minddata/dataset/util/slice.h index 473b26b0..3d223498 100644 --- a/mindspore-lite/minddata/dataset/util/slice.h +++ b/mindspore-lite/minddata/dataset/util/slice.h @@ -25,7 +25,7 @@ #define off64_t off_t #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { /// \brief A ReadableSlice wraps a const pointer in memory and its size. /// \see WritableSlice for a non-const version @@ -129,5 +129,5 @@ class WritableSlice : public ReadableSlice { void *GetMutablePointer() { return mutable_data_; } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_SLICE_H_ diff --git a/mindspore-lite/minddata/dataset/util/status.cc b/mindspore-lite/minddata/dataset/util/status.cc index 09bfce3b..3f1b89a5 100644 --- a/mindspore-lite/minddata/dataset/util/status.cc +++ b/mindspore-lite/minddata/dataset/util/status.cc @@ -26,7 +26,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { #if !defined(_WIN32) && !defined(_WIN64) && !defined(__APPLE__) float GetMemoryUsage() { @@ -87,4 +87,4 @@ float GetMemoryUsage() { } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/status.h b/mindspore-lite/minddata/dataset/util/status.h index cb5f9aa5..8a7ff2aa 100644 --- a/mindspore-lite/minddata/dataset/util/status.h +++ b/mindspore-lite/minddata/dataset/util/status.h @@ -32,7 +32,7 @@ #include "include/api/status.h" #include "mindspore-lite/minddata/dataset/util/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { #define RETURN_IF_NOT_OK(_s) \ do { \ @@ -137,5 +137,5 @@ float GetMemoryUsage(); } while (false) #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_STATUS_H_ diff --git a/mindspore-lite/minddata/dataset/util/system_pool.h b/mindspore-lite/minddata/dataset/util/system_pool.h index e69df666..74ec4a04 100644 --- a/mindspore-lite/minddata/dataset/util/system_pool.h +++ b/mindspore-lite/minddata/dataset/util/system_pool.h @@ -25,7 +25,7 @@ #include "mindspore-lite/minddata/dataset/util/allocator.h" #include "mindspore-lite/minddata/dataset/util/memory_pool.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // This class demonstrate how to implement a simple MemoryPool // for minddata/dataset using malloc/free/realloc. We need to @@ -75,6 +75,6 @@ class SystemPool : public MemoryPool { } }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_SYSTEM_POOL_H_ diff --git a/mindspore-lite/minddata/dataset/util/task.cc b/mindspore-lite/minddata/dataset/util/task.cc index 9b0eb544..62f7ebf5 100644 --- a/mindspore-lite/minddata/dataset/util/task.cc +++ b/mindspore-lite/minddata/dataset/util/task.cc @@ -24,7 +24,7 @@ #include "utils/ms_context.h" #include "mindspore/ccsrc/include/runtime/hardware_abstract/data_queue/data_queue_mgr.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace dataset { thread_local Task *gMyTask = nullptr; @@ -270,4 +270,4 @@ pthread_t Task::GetNativeHandle() const { return native_handle_; } #endif } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/task.h b/mindspore-lite/minddata/dataset/util/task.h index 19b6e30c..a968df79 100644 --- a/mindspore-lite/minddata/dataset/util/task.h +++ b/mindspore-lite/minddata/dataset/util/task.h @@ -40,7 +40,7 @@ #include "mindspore-lite/minddata/dataset/util/log_adapter.h" #include "mindspore-lite/minddata/dataset/util/wait_post.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { const uint32_t kWaitInterruptTaskTime = 30; // the wait time of interrupt task @@ -149,6 +149,6 @@ class Task : public IntrpResource { extern thread_local Task *gMyTask; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_TASK_H_ diff --git a/mindspore-lite/minddata/dataset/util/task_manager.cc b/mindspore-lite/minddata/dataset/util/task_manager.cc index 081b24a4..bae282fd 100644 --- a/mindspore-lite/minddata/dataset/util/task_manager.cc +++ b/mindspore-lite/minddata/dataset/util/task_manager.cc @@ -20,7 +20,7 @@ #include "include/securec.h" #include "utils/ms_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { TaskManager *TaskManager::instance_ = nullptr; std::once_flag TaskManager::init_instance_flag_; @@ -400,4 +400,4 @@ Status TaskGroup::GetTaskErrorIfAny() { std::shared_ptr TaskGroup::GetIntrpService() { return intrp_svc_; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/task_manager.h b/mindspore-lite/minddata/dataset/util/task_manager.h index 88c04c4f..e803598a 100644 --- a/mindspore-lite/minddata/dataset/util/task_manager.h +++ b/mindspore-lite/minddata/dataset/util/task_manager.h @@ -32,7 +32,7 @@ #include "mindspore-lite/minddata/dataset/util/status.h" #include "mindspore-lite/minddata/dataset/util/task.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { namespace thread { using id = std::thread::id; @@ -194,6 +194,6 @@ inline Status GetInterruptStatus() { } while (false) } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_TASK_MANAGER_H_ diff --git a/mindspore-lite/minddata/dataset/util/treap.h b/mindspore-lite/minddata/dataset/util/treap.h index 8e8003f2..a76e18f3 100644 --- a/mindspore-lite/minddata/dataset/util/treap.h +++ b/mindspore-lite/minddata/dataset/util/treap.h @@ -22,7 +22,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace dataset { // A treap is a combination of binary search tree and heap. Each key is given a priority. The priority // for any non-leaf node is greater than or equal to the priority of its children. @@ -403,5 +403,5 @@ class Treap { std::vector free_list_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_TREAP_H_ diff --git a/mindspore-lite/minddata/dataset/util/validators.cc b/mindspore-lite/minddata/dataset/util/validators.cc index 4a9c65fe..cda08a51 100644 --- a/mindspore-lite/minddata/dataset/util/validators.cc +++ b/mindspore-lite/minddata/dataset/util/validators.cc @@ -15,8 +15,8 @@ */ #include "mindspore-lite/minddata/dataset/util/validators.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // implement the validate function here } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/validators.h b/mindspore-lite/minddata/dataset/util/validators.h index 6ce42d05..bbeb7391 100644 --- a/mindspore-lite/minddata/dataset/util/validators.h +++ b/mindspore-lite/minddata/dataset/util/validators.h @@ -27,7 +27,7 @@ #include "mindspore-lite/minddata/dataset/core/tensor.h" #include "mindspore-lite/minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // validator Parameter in json file inline Status ValidateParamInJson(const nlohmann::json &json_obj, const std::string ¶m_name, @@ -223,5 +223,5 @@ std::string NumberSetToString(const std::set &valid_value) { return "(" + err_msg + ")"; } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_VALIDATORS_H_ diff --git a/mindspore-lite/minddata/dataset/util/wait_post.cc b/mindspore-lite/minddata/dataset/util/wait_post.cc index 82f97feb..2304aa9d 100644 --- a/mindspore-lite/minddata/dataset/util/wait_post.cc +++ b/mindspore-lite/minddata/dataset/util/wait_post.cc @@ -16,7 +16,7 @@ #include "mindspore-lite/minddata/dataset/util/wait_post.h" #include "mindspore-lite/minddata/dataset/util/task_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { WaitPost::WaitPost() : value_(0) {} @@ -42,4 +42,4 @@ void WaitPost::ResetIntrpState() { wait_cond_.ResetIntrpState(); } Status WaitPost::Deregister() { return wait_cond_.Deregister(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/dataset/util/wait_post.h b/mindspore-lite/minddata/dataset/util/wait_post.h index 38522f32..5568a08a 100644 --- a/mindspore-lite/minddata/dataset/util/wait_post.h +++ b/mindspore-lite/minddata/dataset/util/wait_post.h @@ -20,7 +20,7 @@ #include "mindspore-lite/minddata/dataset/util/cond_var.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class TaskGroup; @@ -48,6 +48,6 @@ class WaitPost { int value_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_UTIL_WAIT_POST_H_ diff --git a/mindspore-lite/minddata/wrapper/MDToDApi.cc b/mindspore-lite/minddata/wrapper/MDToDApi.cc index 62c9d2f5..17388201 100644 --- a/mindspore-lite/minddata/wrapper/MDToDApi.cc +++ b/mindspore-lite/minddata/wrapper/MDToDApi.cc @@ -53,7 +53,7 @@ using mindspore::Status; using mindspore::dataset::BorderType; using mindspore::dataset::InterpolationMode; -namespace mindspore { +namespace mindspore::lite { class MDToDApi { public: std::shared_ptr _iter; @@ -468,4 +468,4 @@ extern "C" int MDToDApi_UpdateNoOfFaces(MDToDApi *pMDToDApi, int32_t noOfFaces) } return 0; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/wrapper/MDToDApi.h b/mindspore-lite/minddata/wrapper/MDToDApi.h index 4dcab8f9..6f09e957 100644 --- a/mindspore-lite/minddata/wrapper/MDToDApi.h +++ b/mindspore-lite/minddata/wrapper/MDToDApi.h @@ -19,7 +19,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { class MDToDApi; typedef struct MDToDBuff { @@ -58,7 +58,7 @@ typedef struct MDToDResult { MDToDBuff_t imageQualitiesBuff; MDToDBuff_t faceEmbeddingsBuff; } MDToDResult_t; -} // namespace mindspore +} // namespace mindspore::lite using (*MDToDApi_pathTest_t)(const char *path) = int; using (*MDToDApi_testAlbum_t)() = int; diff --git a/mindspore-lite/minddata/wrapper/album_op_android.cc b/mindspore-lite/minddata/wrapper/album_op_android.cc index af5c0265..49b00b0f 100644 --- a/mindspore-lite/minddata/wrapper/album_op_android.cc +++ b/mindspore-lite/minddata/wrapper/album_op_android.cc @@ -21,7 +21,7 @@ #include "minddata/dataset/kernels/image/lite_image_utils.h" #include "minddata/dataset/kernels/image/exif_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { AlbumOp::AlbumOp(const std::string &file_dir, bool do_decode, const std::string &schema_file, @@ -515,4 +515,4 @@ Status AlbumOp::LoadTensorRow(row_id_type row_id, const std::string &file, return Status::OK(); } } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/minddata/wrapper/album_op_android.h b/mindspore-lite/minddata/wrapper/album_op_android.h index 226ba66c..2b53b79f 100644 --- a/mindspore-lite/minddata/wrapper/album_op_android.h +++ b/mindspore-lite/minddata/wrapper/album_op_android.h @@ -32,7 +32,7 @@ #include "minddata/dataset/util/path.h" #include "minddata/dataset/util/status.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { // Forward declares template @@ -189,5 +189,5 @@ class AlbumOp { std::vector column_names_; }; } // namespace dataset -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_MINDDATA_WRAPPER_ALBUM_OP_ANDROID_H_ diff --git a/mindspore-lite/providers/dpico/common/log_util.cc b/mindspore-lite/providers/dpico/common/log_util.cc index aba00a4c..53171189 100644 --- a/mindspore-lite/providers/dpico/common/log_util.cc +++ b/mindspore-lite/providers/dpico/common/log_util.cc @@ -16,7 +16,7 @@ #include "common/log_util.h" #include #include -namespace mindspore { +namespace mindspore::lite { int StrToInt(const char *env) { if (env == nullptr) { return static_cast(mindspore::DpicoLogLevel::WARNING); @@ -71,4 +71,4 @@ void DpicoLogWriter::operator<(const DpicoLogStream &stream) const noexcept { msg << stream.sstream_->rdbuf(); OutputLog(msg); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/dpico/common/log_util.h b/mindspore-lite/providers/dpico/common/log_util.h index be234231..391d437d 100644 --- a/mindspore-lite/providers/dpico/common/log_util.h +++ b/mindspore-lite/providers/dpico/common/log_util.h @@ -30,7 +30,7 @@ static constexpr size_t GetRealPathPos() noexcept { : 0; } -namespace mindspore { +namespace mindspore::lite { #define DPICO_FILE_NAME \ (sizeof(__FILE__) > GetRealPathPos() ? static_cast(__FILE__) + GetRealPathPos() \ : static_cast(__FILE__)) @@ -95,6 +95,6 @@ class DpicoLogWriter { #define MS_LOG_INFO MSLOG_IF(mindspore::DpicoLogLevel::INFO) #define MS_LOG_WARNING MSLOG_IF(mindspore::DpicoLogLevel::WARNING) #define MS_LOG_ERROR MSLOG_IF(mindspore::DpicoLogLevel::ERROR) -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_DPICO_COMMON_LOG_UTIL_H_ diff --git a/mindspore-lite/providers/dpico/common/op_attr.h b/mindspore-lite/providers/dpico/common/op_attr.h index a7e43e30..f28462e6 100644 --- a/mindspore-lite/providers/dpico/common/op_attr.h +++ b/mindspore-lite/providers/dpico/common/op_attr.h @@ -17,7 +17,7 @@ #ifndef MINDSPORE_LITE_PROVIDERS_DPICO_COMMON_OP_ATTR_H_ #define MINDSPORE_LITE_PROVIDERS_DPICO_COMMON_OP_ATTR_H_ -namespace mindspore { +namespace mindspore::lite { constexpr auto kAclConfigPath = "AclConfigPath"; constexpr auto kLastDimStride = "internal_stride"; constexpr auto kSupportZeroCopy = "SupportZeroCopy"; @@ -36,5 +36,5 @@ constexpr auto kNmsThreshold = "NmsThreshold"; constexpr auto kOutputsFormat = "outputs_format"; constexpr auto kOutputsShape = "outputs_shape"; constexpr auto kScoreThreshold = "ScoreThreshold"; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_DPICO_COMMON_OP_ATTR_H_ diff --git a/mindspore-lite/providers/dpico/infer/custom_infer.cc b/mindspore-lite/providers/dpico/infer/custom_infer.cc index 0cb655d6..2efbcd41 100644 --- a/mindspore-lite/providers/dpico/infer/custom_infer.cc +++ b/mindspore-lite/providers/dpico/infer/custom_infer.cc @@ -33,7 +33,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Custom; -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int kOmParameterNum = 1; @@ -209,9 +209,9 @@ std::shared_ptr CustomInferCreator() { return infer; } } // namespace dpico -} // namespace mindspore -namespace mindspore { +} // namespace mindspore::lite +namespace mindspore::lite { namespace kernel { REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, DPICO, dpico::CustomInferCreator); } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/dpico/infer/custom_infer.h b/mindspore-lite/providers/dpico/infer/custom_infer.h index 1dec5cb0..e94d76e3 100644 --- a/mindspore-lite/providers/dpico/infer/custom_infer.h +++ b/mindspore-lite/providers/dpico/infer/custom_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class CustomInterface : public kernel::KernelInterface { public: @@ -32,6 +32,6 @@ class CustomInterface : public kernel::KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_DPICO_INFER_CUSTOM_INFER_H_ diff --git a/mindspore-lite/providers/nnie/src/custom_allocator.h b/mindspore-lite/providers/nnie/src/custom_allocator.h index 444552e8..21be154a 100644 --- a/mindspore-lite/providers/nnie/src/custom_allocator.h +++ b/mindspore-lite/providers/nnie/src/custom_allocator.h @@ -28,7 +28,7 @@ #include "include/api/allocator.h" #include "include/hi_type.h" -namespace mindspore { +namespace mindspore::lite { namespace nnie { class CustomAllocator : public Allocator { public: @@ -42,6 +42,6 @@ class CustomAllocator : public Allocator { int IncRefCount(void *ptr, int ref_count) override { return 1; } }; } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_NNIE_SRC_CUSTOM_ALLOCATOR_H_ diff --git a/mindspore-lite/providers/nnie/src/custom_fp32.cc b/mindspore-lite/providers/nnie/src/custom_fp32.cc index 5ddebc36..97a7d3dc 100644 --- a/mindspore-lite/providers/nnie/src/custom_fp32.cc +++ b/mindspore-lite/providers/nnie/src/custom_fp32.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Custom; -namespace mindspore { +namespace mindspore::lite { namespace nnie { static std::shared_ptr kCustomAllocator = std::make_shared(); @@ -183,8 +183,8 @@ std::shared_ptr CustomCreateKernel(const std::vector< return kernel; } } // namespace nnie -} // namespace mindspore -namespace mindspore { +} // namespace mindspore::lite +namespace mindspore::lite { namespace registry { namespace { const auto kFloat32 = DataType::kNumberTypeFloat32; @@ -195,4 +195,4 @@ REGISTER_CUSTOM_KERNEL(CPU, NNIE, kFloat32, NNIE, nnie::CustomCreateKernel) REGISTER_CUSTOM_KERNEL(CPU, NNIE, kInt8, NNIE, nnie::CustomCreateKernel) REGISTER_CUSTOM_KERNEL(CPU, NNIE, kUint8, NNIE, nnie::CustomCreateKernel) } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/nnie/src/custom_fp32.h b/mindspore-lite/providers/nnie/src/custom_fp32.h index 4e4acf4e..65d754dc 100644 --- a/mindspore-lite/providers/nnie/src/custom_fp32.h +++ b/mindspore-lite/providers/nnie/src/custom_fp32.h @@ -31,7 +31,7 @@ using mindspore::MSTensor; using mindspore::kernel::Kernel; -namespace mindspore { +namespace mindspore::lite { namespace nnie { class CustomCPUKernel : public Kernel { public: @@ -67,5 +67,5 @@ class CustomCPUKernel : public Kernel { std::vector> outputs_shapes_; }; } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_NNIE_SRC_CUSTOM_FP32_H_ diff --git a/mindspore-lite/providers/nnie/src/custom_infer.cc b/mindspore-lite/providers/nnie/src/custom_infer.cc index 08a58700..89ea0d4c 100644 --- a/mindspore-lite/providers/nnie/src/custom_infer.cc +++ b/mindspore-lite/providers/nnie/src/custom_infer.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Custom; -namespace mindspore { +namespace mindspore::lite { namespace nnie { std::shared_ptr CustomInferCreater() { auto infer = new (std::nothrow) CustomInterface(); @@ -158,9 +158,9 @@ Status CustomInterface::Infer(std::vector *inputs, std::vec return kSuccess; } } // namespace nnie -} // namespace mindspore -namespace mindspore { +} // namespace mindspore::lite +namespace mindspore::lite { namespace kernel { REGISTER_CUSTOM_KERNEL_INTERFACE(NNIE, NNIE, nnie::CustomInferCreater); } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/nnie/src/custom_infer.h b/mindspore-lite/providers/nnie/src/custom_infer.h index ec9ddc0a..e2d383c5 100644 --- a/mindspore-lite/providers/nnie/src/custom_infer.h +++ b/mindspore-lite/providers/nnie/src/custom_infer.h @@ -19,7 +19,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace nnie { class CustomInterface : public mindspore::kernel::KernelInterface { public: @@ -31,5 +31,5 @@ class CustomInterface : public mindspore::kernel::KernelInterface { const mindspore::schema::Primitive *primitive) override; }; } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_NNIE_SRC_CUSTOM_INFER_H_ diff --git a/mindspore-lite/providers/nnie/src/nnie_cfg_parser.cc b/mindspore-lite/providers/nnie/src/nnie_cfg_parser.cc index d51744ff..a82c84e1 100644 --- a/mindspore-lite/providers/nnie/src/nnie_cfg_parser.cc +++ b/mindspore-lite/providers/nnie/src/nnie_cfg_parser.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace nnie { namespace { constexpr auto kTimeStep = "TimeStep"; @@ -114,4 +114,4 @@ int Flags::Init(const kernel::Kernel &kernel) { return RET_OK; } } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/nnie/src/nnie_cfg_parser.h b/mindspore-lite/providers/nnie/src/nnie_cfg_parser.h index 92875bc1..e7fdd3c3 100644 --- a/mindspore-lite/providers/nnie/src/nnie_cfg_parser.h +++ b/mindspore-lite/providers/nnie/src/nnie_cfg_parser.h @@ -21,7 +21,7 @@ #include "include/api/kernel.h" #include "include/hi_type.h" -namespace mindspore { +namespace mindspore::lite { namespace nnie { typedef struct { HI_U64 phy_; @@ -53,5 +53,5 @@ class Flags { int ParserBool(const std::map &nnie_arg, const std::string key, bool *val); }; } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/providers/nnie/src/nnie_common.cc b/mindspore-lite/providers/nnie/src/nnie_common.cc index 135bd899..1f296bd0 100644 --- a/mindspore-lite/providers/nnie/src/nnie_common.cc +++ b/mindspore-lite/providers/nnie/src/nnie_common.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace nnie { constexpr int kSleepUs = 100; constexpr int kCompressionWidth = 2; @@ -969,4 +969,4 @@ int NnieCommRun(NnieRunCfg *nnie_run_cfg, bool run_box) { return RET_OK; } } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/nnie/src/nnie_common.h b/mindspore-lite/providers/nnie/src/nnie_common.h index ddd6ddb2..ad9565e9 100644 --- a/mindspore-lite/providers/nnie/src/nnie_common.h +++ b/mindspore-lite/providers/nnie/src/nnie_common.h @@ -27,7 +27,7 @@ #include "include/ir/dtype/type_id.h" #include "src/nnie_cfg_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace nnie { #define NNIE_ALIGN_16 16 #define NNIE_ALIGN16(u32Num) ((u32Num + NNIE_ALIGN_16 - 1) / NNIE_ALIGN_16 * NNIE_ALIGN_16) @@ -117,5 +117,5 @@ int NnieCommGetOutputData(NnieRunCfg *nnie_run_cfg, float *data, HI_U32 output_s HI_U32 GetBlobSize(const SVP_SRC_BLOB_S &blob); } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_NNIE_SRC_NNIE_COMMON_H_ diff --git a/mindspore-lite/providers/nnie/src/nnie_manager.cc b/mindspore-lite/providers/nnie/src/nnie_manager.cc index 48ae58f2..7564be5b 100644 --- a/mindspore-lite/providers/nnie/src/nnie_manager.cc +++ b/mindspore-lite/providers/nnie/src/nnie_manager.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace nnie { constexpr int kUINT16_MAX = 65535; constexpr int kNumInput2 = 2; @@ -545,4 +545,4 @@ int NNIEManager::FillData(std::vector *inputs, unsigned int return RET_OK; } } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/nnie/src/nnie_manager.h b/mindspore-lite/providers/nnie/src/nnie_manager.h index b0fdc8ce..d8b39df5 100644 --- a/mindspore-lite/providers/nnie/src/nnie_manager.h +++ b/mindspore-lite/providers/nnie/src/nnie_manager.h @@ -26,7 +26,7 @@ #include "src/nnie_common.h" #include "src/nnie_cfg_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace nnie { class NNIEManager { public: @@ -113,5 +113,5 @@ class NNIEManager { std::vector tensors_; }; } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_NNIE_SRC_NNIE_MANAGER_H_ diff --git a/mindspore-lite/providers/nnie/src/nnie_memory.cc b/mindspore-lite/providers/nnie/src/nnie_memory.cc index 4f80dacf..797fe7f5 100644 --- a/mindspore-lite/providers/nnie/src/nnie_memory.cc +++ b/mindspore-lite/providers/nnie/src/nnie_memory.cc @@ -18,7 +18,7 @@ #include "include/mpi_sys.h" #include "src/nnie_common.h" -namespace mindspore { +namespace mindspore::lite { namespace nnie { HI_S32 NnieMemMalloc(const HI_CHAR *mmb, HI_CHAR *zone, HI_U64 *pu_phy_addr, HI_VOID **ppv_vir_addr, HI_U32 size) { return HI_MPI_SYS_MmzAlloc(pu_phy_addr, ppv_vir_addr, mmb, zone, size); @@ -42,4 +42,4 @@ HI_S32 NnieGetVirMemInfo(HI_U64 pv_vir_addr, HI_U64 *phy_addr) { return ret; } } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/nnie/src/nnie_memory.h b/mindspore-lite/providers/nnie/src/nnie_memory.h index 7c72499a..1230d678 100644 --- a/mindspore-lite/providers/nnie/src/nnie_memory.h +++ b/mindspore-lite/providers/nnie/src/nnie_memory.h @@ -27,7 +27,7 @@ #include "include/mpi_nnie.h" #include "include/mpi_sys.h" -namespace mindspore { +namespace mindspore::lite { namespace nnie { #define NNIE_MEM_FREE(phy, vir) \ do { \ @@ -46,5 +46,5 @@ HI_S32 NnieMemFlushCache(HI_U64 phy_addr, HI_VOID *pv_vir_addr, HI_U32 size); HI_S32 NnieGetVirMemInfo(HI_U64 pv_vir_addr, HI_U64 *phy_addr); } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_NNIE_SRC_NNIE_MEMORY_H_ diff --git a/mindspore-lite/providers/nnie/src/nnie_print.cc b/mindspore-lite/providers/nnie/src/nnie_print.cc index dc1d2c5b..435f341e 100644 --- a/mindspore-lite/providers/nnie/src/nnie_print.cc +++ b/mindspore-lite/providers/nnie/src/nnie_print.cc @@ -16,7 +16,7 @@ #include "src/nnie_print.h" -namespace mindspore { +namespace mindspore::lite { namespace nnie { HI_S32 NniePrintReportResult(NnieParam *pst_nnie_param) { HI_U32 u32seg_num = pst_nnie_param->model_->u32NetSegNum; @@ -173,4 +173,4 @@ HI_S32 NniePrintReportResultInputSeg(NnieParam *pst_nnie_param, int segnum) { return HI_SUCCESS; } } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/nnie/src/nnie_print.h b/mindspore-lite/providers/nnie/src/nnie_print.h index 389716df..d1697ad3 100644 --- a/mindspore-lite/providers/nnie/src/nnie_print.h +++ b/mindspore-lite/providers/nnie/src/nnie_print.h @@ -48,11 +48,11 @@ constexpr int kMaxSize = 1024; constexpr int kDecimal = 10; -namespace mindspore { +namespace mindspore::lite { namespace nnie { HI_S32 NniePrintReportResult(NnieParam *pst_nnie_param); HI_S32 NniePrintReportResultInputSeg(NnieParam *pst_nnie_param, int segnum); } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_NNIE_SRC_NNIE_PRINT_H_ diff --git a/mindspore-lite/providers/nnie_proposal/src/proposal.cc b/mindspore-lite/providers/nnie_proposal/src/proposal.cc index 240558db..b0a4ce26 100644 --- a/mindspore-lite/providers/nnie_proposal/src/proposal.cc +++ b/mindspore-lite/providers/nnie_proposal/src/proposal.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace proposal { constexpr int kNumInput2 = 2; constexpr int kScoreSizeIndex = 2; @@ -619,4 +619,4 @@ void ProposalDeInit(ProposalParam *param) { } } } // namespace proposal -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/nnie_proposal/src/proposal.h b/mindspore-lite/providers/nnie_proposal/src/proposal.h index 41b23c42..920cce1b 100644 --- a/mindspore-lite/providers/nnie_proposal/src/proposal.h +++ b/mindspore-lite/providers/nnie_proposal/src/proposal.h @@ -36,7 +36,7 @@ } \ } while (0) -namespace mindspore { +namespace mindspore::lite { namespace proposal { typedef struct { uint32_t stride_; @@ -89,5 +89,5 @@ int32_t ProposalInit(ProposalParam *param, uint32_t max_roi_num, uint32_t ori_im int32_t ProposalRun(ProposalParam *param); void ProposalDeInit(ProposalParam *param); } // namespace proposal -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_NNIE_PROPOSAL_SRC_PROPOSAL_H_ diff --git a/mindspore-lite/providers/nnie_proposal/src/proposal_fp32.cc b/mindspore-lite/providers/nnie_proposal/src/proposal_fp32.cc index c87b5ece..93d910fd 100644 --- a/mindspore-lite/providers/nnie_proposal/src/proposal_fp32.cc +++ b/mindspore-lite/providers/nnie_proposal/src/proposal_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Custom; -namespace mindspore { +namespace mindspore::lite { namespace proposal { constexpr int kMaxSize = 1024; constexpr int kNumInput2 = 2; @@ -224,13 +224,13 @@ std::shared_ptr ProposalCreateKernel(const std::vecto return kernel; } } // namespace proposal -} // namespace mindspore +} // namespace mindspore::lite -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { const auto kFloat32 = DataType::kNumberTypeFloat32; } REGISTER_CUSTOM_KERNEL(CPU, NNIE, kFloat32, Proposal, proposal::ProposalCreateKernel) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/nnie_proposal/src/proposal_fp32.h b/mindspore-lite/providers/nnie_proposal/src/proposal_fp32.h index e46d618e..5eb5e5be 100644 --- a/mindspore-lite/providers/nnie_proposal/src/proposal_fp32.h +++ b/mindspore-lite/providers/nnie_proposal/src/proposal_fp32.h @@ -23,7 +23,7 @@ #include "src/proposal.h" using mindspore::kernel::Kernel; -namespace mindspore { +namespace mindspore::lite { namespace proposal { class ProposalCPUKernel : public Kernel { public: @@ -45,6 +45,6 @@ class ProposalCPUKernel : public Kernel { int64_t image_weight_; }; } // namespace proposal -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_NNIE_PROPOSAL_SRC_PROPOSAL_FP32_H_ diff --git a/mindspore-lite/providers/nnie_proposal/src/proposal_infer.cc b/mindspore-lite/providers/nnie_proposal/src/proposal_infer.cc index 58e4a6d0..ce23cff9 100644 --- a/mindspore-lite/providers/nnie_proposal/src/proposal_infer.cc +++ b/mindspore-lite/providers/nnie_proposal/src/proposal_infer.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Custom; -namespace mindspore { +namespace mindspore::lite { namespace proposal { std::shared_ptr ProposalInferCreater() { auto infer = std::make_shared(); @@ -64,9 +64,9 @@ Status ProposalInterface::Infer(std::vector *inputs, std::v return kSuccess; } } // namespace proposal -} // namespace mindspore -namespace mindspore { +} // namespace mindspore::lite +namespace mindspore::lite { namespace kernel { REGISTER_CUSTOM_KERNEL_INTERFACE(NNIE, Proposal, proposal::ProposalInferCreater); } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/providers/nnie_proposal/src/proposal_infer.h b/mindspore-lite/providers/nnie_proposal/src/proposal_infer.h index fe81d8b7..1bd7351a 100644 --- a/mindspore-lite/providers/nnie_proposal/src/proposal_infer.h +++ b/mindspore-lite/providers/nnie_proposal/src/proposal_infer.h @@ -19,7 +19,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace proposal { class ProposalInterface : public mindspore::kernel::KernelInterface { public: @@ -31,5 +31,5 @@ class ProposalInterface : public mindspore::kernel::KernelInterface { const mindspore::schema::Primitive *primitive) override; }; } // namespace proposal -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_PROVIDERS_NNIE_PROPOSAL_SRC_PROPOSAL_INFER_H_ diff --git a/mindspore-lite/python/src/tensor_numpy_impl.h b/mindspore-lite/python/src/tensor_numpy_impl.h index f1a6b7f2..5dacc097 100644 --- a/mindspore-lite/python/src/tensor_numpy_impl.h +++ b/mindspore-lite/python/src/tensor_numpy_impl.h @@ -33,7 +33,7 @@ #endif namespace py = pybind11; -namespace mindspore { +namespace mindspore::lite { class TensorNumpyImpl : public MutableTensorImpl { public: TensorNumpyImpl(const std::string &name, py::buffer_info &&buffer, const std::vector &ms_shape) @@ -176,6 +176,6 @@ class TensorNumpyImpl : public MutableTensorImpl { std::string device_ = ""; int device_id_ = -1; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_UTILS_TENSOR_NUMPY_IMPL_H_ diff --git a/mindspore-lite/src/common/config_infos.cc b/mindspore-lite/src/common/config_infos.cc index a8c67946..391d45a0 100644 --- a/mindspore-lite/src/common/config_infos.cc +++ b/mindspore-lite/src/common/config_infos.cc @@ -21,7 +21,7 @@ #include "src/common/common.h" #include "src/common/utils.h" -namespace mindspore { +namespace mindspore::lite { bool ProfileParser::ParseRangeStr(const std::string &range_str, int64_t *min_ptr, int64_t *max_ptr) { if (min_ptr == nullptr || max_ptr == nullptr) { return false; @@ -327,4 +327,4 @@ std::string ProfileParser::GetOption(const std::map &c } return it->second; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/common/config_infos.h b/mindspore-lite/src/common/config_infos.h index a5c4996d..5a246086 100644 --- a/mindspore-lite/src/common/config_infos.h +++ b/mindspore-lite/src/common/config_infos.h @@ -23,7 +23,7 @@ #include "include/api/visible.h" #include "mindapi/base/shape_vector.h" -namespace mindspore { +namespace mindspore::lite { using ConfigInfos = std::map>; struct ProfileInputInfo { @@ -65,6 +65,6 @@ class MS_API ProfileParser { static bool ParseOptDimStr(const std::string &opt_dim_str, int64_t *opt_ptr); }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_COMMON_CONFIG_INFOS_H_ diff --git a/mindspore-lite/src/common/log.cc b/mindspore-lite/src/common/log.cc index 96202753..5f950111 100644 --- a/mindspore-lite/src/common/log.cc +++ b/mindspore-lite/src/common/log.cc @@ -22,7 +22,7 @@ #endif // namespace to support utils module definition namespace mindspore constexpr const char *ANDROID_LOG_TAG = "MS_LITE"; -namespace mindspore { +namespace mindspore::lite { #if defined(__ANDROID__) constexpr const char *ANDROID_LOG_TAG = "MS_LITE"; #endif @@ -120,4 +120,4 @@ void LiteLogWriter::operator<(const LiteLogStream &stream) const noexcept { msg << stream.sstream_->rdbuf(); OutputLog(msg); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/common/log.h b/mindspore-lite/src/common/log.h index 5dae9ab8..2460eaee 100644 --- a/mindspore-lite/src/common/log.h +++ b/mindspore-lite/src/common/log.h @@ -40,7 +40,7 @@ static constexpr size_t GetRealPathPos() noexcept { : 0; } -namespace mindspore { +namespace mindspore::lite { #define LITE_FILE_NAME \ (sizeof(__FILE__) > GetRealPathPos() ? static_cast(__FILE__) + GetRealPathPos() \ : static_cast(__FILE__)) @@ -150,7 +150,7 @@ class LiteLogWriter { #define MS_LOG_INFO MSLOG_IF(mindspore::LiteLogLevel::INFO) #define MS_LOG_WARNING MSLOG_IF(mindspore::LiteLogLevel::WARNING) #define MS_LOG_ERROR MSLOG_IF(mindspore::LiteLogLevel::ERROR) -} // namespace mindspore +} // namespace mindspore::lite #ifdef Debug #include diff --git a/mindspore-lite/src/common/log_adapter.h b/mindspore-lite/src/common/log_adapter.h index 39bb69c2..a0727a9f 100644 --- a/mindspore-lite/src/common/log_adapter.h +++ b/mindspore-lite/src/common/log_adapter.h @@ -23,7 +23,7 @@ #endif // USE_GLOG #include "src/common/log_util.h" -namespace mindspore { +namespace mindspore::lite { const char *const unsupport_string_tensor_log = "This mindspore-lite library does not support string tensors. Set environment variable MSLITE_ENABLE_STRING_KERNEL " "to on to " @@ -60,6 +60,6 @@ static inline bool IsPrintDebug() { auto env = std::getenv("GLOG_v"); return env != nullptr && env[0] == '0'; } -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_COMMON_LOG_ADAPTER_H_ diff --git a/mindspore-lite/src/common/mutable_tensor_impl.h b/mindspore-lite/src/common/mutable_tensor_impl.h index d782b5b4..64b6c75d 100644 --- a/mindspore-lite/src/common/mutable_tensor_impl.h +++ b/mindspore-lite/src/common/mutable_tensor_impl.h @@ -22,7 +22,7 @@ #include #include "ir/api_tensor_impl.h" -namespace mindspore { +namespace mindspore::lite { class MutableTensorImpl : public MSTensor::Impl { public: virtual void SetName(const std::string &name) = 0; @@ -66,5 +66,5 @@ class MutableTensorImpl : public MSTensor::Impl { } }; using MutableTensorImplPtr = std::shared_ptr; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_COMMON_MUTABLE_TESNOR_IMPL_H_ diff --git a/mindspore-lite/src/control_flow/kernel/entrance_subgraph_kernel.cc b/mindspore-lite/src/control_flow/kernel/entrance_subgraph_kernel.cc index 6063ad96..690f6015 100644 --- a/mindspore-lite/src/control_flow/kernel/entrance_subgraph_kernel.cc +++ b/mindspore-lite/src/control_flow/kernel/entrance_subgraph_kernel.cc @@ -17,7 +17,7 @@ #include "src/control_flow/kernel/entrance_subgraph_kernel.h" #include "src/tensor.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int EntranceSubGraphKernel::Execute(const KernelCallBack &, const KernelCallBack &) { return lite::RET_OK; } SubGraphKernel *EntranceSubGraphKernel::Create(MSKernel *kernel) { @@ -27,4 +27,4 @@ SubGraphKernel *EntranceSubGraphKernel::Create(MSKernel *kernel) { } return sub_kernel; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/control_flow/kernel/entrance_subgraph_kernel.h b/mindspore-lite/src/control_flow/kernel/entrance_subgraph_kernel.h index f0804e2a..a19a3614 100644 --- a/mindspore-lite/src/control_flow/kernel/entrance_subgraph_kernel.h +++ b/mindspore-lite/src/control_flow/kernel/entrance_subgraph_kernel.h @@ -29,7 +29,7 @@ #include "src/litert/cpu_info.h" #include "src/executor/sub_graph_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class EntranceSubGraphKernel : public SubGraphKernel { public: explicit EntranceSubGraphKernel(MSKernel *kernel) : SubGraphKernel({}, {}, {}, kernel) { @@ -48,5 +48,5 @@ class EntranceSubGraphKernel : public SubGraphKernel { int ReSize() override { return RET_OK; }; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_CONTROL_FLOW_KERNEL_ENTRANCE_SUBGRAPH_KERNEL_H_ diff --git a/mindspore-lite/src/control_flow/kernel/exit_subgraph_kernel.cc b/mindspore-lite/src/control_flow/kernel/exit_subgraph_kernel.cc index d8718153..0f47b416 100644 --- a/mindspore-lite/src/control_flow/kernel/exit_subgraph_kernel.cc +++ b/mindspore-lite/src/control_flow/kernel/exit_subgraph_kernel.cc @@ -17,7 +17,7 @@ #include "src/control_flow/kernel/exit_subgraph_kernel.h" #include "src/tensor.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ExitSubGraphKernel::Execute(const KernelCallBack &, const KernelCallBack &) { return lite::RET_OK; } SubGraphKernel *ExitSubGraphKernel::Create(MSKernel *kernel) { @@ -29,4 +29,4 @@ SubGraphKernel *ExitSubGraphKernel::Create(MSKernel *kernel) { } void ExitSubGraphKernel::SetPartial(kernel::KernelExec *partial_node) { (void)partials_.insert(partial_node); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/control_flow/kernel/exit_subgraph_kernel.h b/mindspore-lite/src/control_flow/kernel/exit_subgraph_kernel.h index a3fb7464..029c9a94 100644 --- a/mindspore-lite/src/control_flow/kernel/exit_subgraph_kernel.h +++ b/mindspore-lite/src/control_flow/kernel/exit_subgraph_kernel.h @@ -30,7 +30,7 @@ #include "src/litert/cpu_info.h" #include "src/executor/sub_graph_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ExitSubGraphKernel : public SubGraphKernel { public: explicit ExitSubGraphKernel(MSKernel *kernel) : SubGraphKernel({}, {}, {}, kernel) { subgraph_type_ = kExitSubGraph; } @@ -55,5 +55,5 @@ class ExitSubGraphKernel : public SubGraphKernel { int schema_version_ = lite::SCHEMA_VERSION::SCHEMA_CUR; std::set partials_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_CONTROL_FLOW_KERNEL_EXIT_SUBGRAPH_KERNEL_H_ diff --git a/mindspore-lite/src/control_flow/kernel/identity_kernel.cc b/mindspore-lite/src/control_flow/kernel/identity_kernel.cc index 8c372adf..04d8bc2b 100644 --- a/mindspore-lite/src/control_flow/kernel/identity_kernel.cc +++ b/mindspore-lite/src/control_flow/kernel/identity_kernel.cc @@ -20,7 +20,7 @@ #include "src/common/tensor_util.h" #include "src/common/prim_inner.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int IdentityKernel::Run() { auto ret = lite::RET_OK; for (size_t i = 0; i < in_tensors().size(); ++i) { @@ -108,4 +108,4 @@ KernelExec *IdentityKernel::Create(std::vector in_tensors, std:: return nullptr; } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/control_flow/kernel/identity_kernel.h b/mindspore-lite/src/control_flow/kernel/identity_kernel.h index 7f4ae027..2c077965 100644 --- a/mindspore-lite/src/control_flow/kernel/identity_kernel.h +++ b/mindspore-lite/src/control_flow/kernel/identity_kernel.h @@ -30,7 +30,7 @@ #include "src/litert/cpu_info.h" #include "src/executor/sub_graph_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { // Identity kernel is used to update a reference to a tensor. This is useful in control flow model. class IdentityKernel : public LiteKernel { public: @@ -57,5 +57,5 @@ class IdentityKernel : public LiteKernel { std::vector need_resize_{}; bool support_fp16_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_CONTROL_FLOW_KERNEL_IDENTITY_KERNEL_H_ diff --git a/mindspore-lite/src/executor/kernel_exec.cc b/mindspore-lite/src/executor/kernel_exec.cc index 4efe7046..f0c4fdd5 100644 --- a/mindspore-lite/src/executor/kernel_exec.cc +++ b/mindspore-lite/src/executor/kernel_exec.cc @@ -20,7 +20,7 @@ #include "src/common/utils.h" #include "src/common/version_manager.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; @@ -90,4 +90,4 @@ int KernelExec::DoExecute() { } return ret; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/executor/sub_graph_kernel.cc b/mindspore-lite/src/executor/sub_graph_kernel.cc index 5a0ad96c..535a3d7f 100644 --- a/mindspore-lite/src/executor/sub_graph_kernel.cc +++ b/mindspore-lite/src/executor/sub_graph_kernel.cc @@ -29,7 +29,7 @@ #include "src/common/utils.h" #include "src/litert/kernel_exec_util.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_ERR; using mindspore::lite::RET_INFER_INVALID; @@ -645,4 +645,4 @@ int AclSubGraph::Execute(const KernelCallBack &before, const KernelCallBack &aft } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/executor/sub_graph_kernel.h b/mindspore-lite/src/executor/sub_graph_kernel.h index 8f84a021..a3b7da91 100644 --- a/mindspore-lite/src/executor/sub_graph_kernel.h +++ b/mindspore-lite/src/executor/sub_graph_kernel.h @@ -32,7 +32,7 @@ #include "nnacl_c/constant_of_shape_parameter.h" #endif -namespace mindspore::kernel { +namespace mindspore::lite::kernel { // store origin data and allocator of input tensor of subgraph for PreProcess and PostProcess struct DataStore { void *data_ = nullptr; @@ -310,5 +310,5 @@ class AclSubGraph : public SubGraphKernel { int Execute() override { return Execute(nullptr, nullptr); } int Execute(const KernelCallBack &before, const KernelCallBack &after) override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_SUB_GRAPH_KERNEL_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/cell.cc b/mindspore-lite/src/extendrt/cxx_api/cell.cc index 18927cd1..b85712f3 100644 --- a/mindspore-lite/src/extendrt/cxx_api/cell.cc +++ b/mindspore-lite/src/extendrt/cxx_api/cell.cc @@ -17,7 +17,7 @@ #include "include/api/cell.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { std::vector CellBase::operator()(const std::vector &inputs) const { std::vector empty; MS_LOG(ERROR) << "Unsupported feature."; @@ -58,4 +58,4 @@ InputAndOutput::InputAndOutput(const std::shared_ptr &cell, const std: int32_t index) { MS_LOG(ERROR) << "Unsupported feature."; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/context.cc b/mindspore-lite/src/extendrt/cxx_api/context.cc index af4d2a05..6a4dd42f 100644 --- a/mindspore-lite/src/extendrt/cxx_api/context.cc +++ b/mindspore-lite/src/extendrt/cxx_api/context.cc @@ -23,7 +23,7 @@ #include "src/common/log_adapter.h" #include "src/extendrt/delegate_graph_executor.h" -namespace mindspore { +namespace mindspore::lite { constexpr auto kModelOptionCpuEnableFP16 = "mindspore.option.cpu.enable_fp16"; constexpr auto kModelOptionGPUEnableFP16 = "mindspore.option.gpu.enable_fp16"; constexpr auto kModelOptionNPUEnableFP16 = "mindspore.option.npu.enable_fp16"; @@ -539,4 +539,4 @@ int KirinNPUDeviceInfo::GetFrequency() const { MS_LOG(ERROR) << "Unsupported Feature."; return 0; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/context.h b/mindspore-lite/src/extendrt/cxx_api/context.h index aee463b9..85c2e38b 100644 --- a/mindspore-lite/src/extendrt/cxx_api/context.h +++ b/mindspore-lite/src/extendrt/cxx_api/context.h @@ -29,7 +29,7 @@ #include "include/api/context.h" #include "include/api/delegate_api.h" -namespace mindspore { +namespace mindspore::lite { struct Context::Data { std::vector> device_info_list; int affinity_mode_ = 0; @@ -47,6 +47,6 @@ struct DeviceInfoContext::Data { #endif std::shared_ptr allocator = nullptr; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_CONTEXT_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/dlutils.h b/mindspore-lite/src/extendrt/cxx_api/dlutils.h index dd495936..584190cd 100644 --- a/mindspore-lite/src/extendrt/cxx_api/dlutils.h +++ b/mindspore-lite/src/extendrt/cxx_api/dlutils.h @@ -26,7 +26,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { inline std::string FindFileWithRecursion(const std::string &parent_dir, const std::string &target_so, int depth = 0) { constexpr int MAX_RECURSION_DEPTH = 5; if (depth == MAX_RECURSION_DEPTH) { @@ -183,7 +183,7 @@ inline void DLSoClose(void *handle) { return __rc; \ } \ } while (false) -} // namespace mindspore +} // namespace mindspore::lite #else inline mindspore::Status FindSoPath(const std::string &benchmark_so_path, const std::string &target_so, std::string *target_so_path) { diff --git a/mindspore-lite/src/extendrt/cxx_api/file_utils.h b/mindspore-lite/src/extendrt/cxx_api/file_utils.h index e827cb64..3593450b 100644 --- a/mindspore-lite/src/extendrt/cxx_api/file_utils.h +++ b/mindspore-lite/src/extendrt/cxx_api/file_utils.h @@ -20,8 +20,8 @@ #include #include "include/api/types.h" -namespace mindspore { +namespace mindspore::lite { Buffer ReadFile(const std::string &file); std::vector ReadFileNames(const std::string &dir); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_FILE_UTILS_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/graph/graph.cc b/mindspore-lite/src/extendrt/cxx_api/graph/graph.cc index e6c33500..c4cd18b4 100644 --- a/mindspore-lite/src/extendrt/cxx_api/graph/graph.cc +++ b/mindspore-lite/src/extendrt/cxx_api/graph/graph.cc @@ -17,7 +17,7 @@ #include "extendrt/cxx_api/graph/graph_data.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { Graph::Graph() : graph_data_(nullptr) {} Graph::Graph(const std::shared_ptr &graph_data) : graph_data_(graph_data) {} @@ -36,4 +36,4 @@ ModelType Graph::ModelType() const { MS_EXCEPTION_IF_NULL(graph_data_); return graph_data_->ModelType(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/graph/graph_data.cc b/mindspore-lite/src/extendrt/cxx_api/graph/graph_data.cc index f3d1cffe..e9a6880d 100644 --- a/mindspore-lite/src/extendrt/cxx_api/graph/graph_data.cc +++ b/mindspore-lite/src/extendrt/cxx_api/graph/graph_data.cc @@ -16,7 +16,7 @@ #include "extendrt/cxx_api/graph/graph_data.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { Graph::GraphData::GraphData(const FuncGraphPtr &func_graph, mindspore::ModelType model_type) : func_graph_(nullptr), om_data_(), model_type_(ModelType::kUnknownType), data_graph_({}) { if (model_type != ModelType::kMindIR) { @@ -57,4 +57,4 @@ Buffer Graph::GraphData::GetOMData() const { void Graph::GraphData::SetPreprocess(const std::vector> &data_graph) { data_graph_ = data_graph; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/graph/graph_data.h b/mindspore-lite/src/extendrt/cxx_api/graph/graph_data.h index 1b0ad53a..bc162084 100644 --- a/mindspore-lite/src/extendrt/cxx_api/graph/graph_data.h +++ b/mindspore-lite/src/extendrt/cxx_api/graph/graph_data.h @@ -23,7 +23,7 @@ #include "include/api/types.h" #include "ir/func_graph.h" -namespace mindspore { +namespace mindspore::lite { namespace dataset { class Execute; } @@ -53,5 +53,5 @@ class Graph::GraphData { enum ModelType model_type_; std::vector> data_graph_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_GRAPH_GRAPH_DATA_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/graph/graph_impl.h b/mindspore-lite/src/extendrt/cxx_api/graph/graph_impl.h index f310e017..21c82f77 100644 --- a/mindspore-lite/src/extendrt/cxx_api/graph/graph_impl.h +++ b/mindspore-lite/src/extendrt/cxx_api/graph/graph_impl.h @@ -26,7 +26,7 @@ #include "extendrt/cxx_api/graph/graph_data.h" #include "include/common/utils/utils.h" -namespace mindspore { +namespace mindspore::lite { class GraphCell::GraphImpl { public: GraphImpl() : graph_(nullptr), graph_context_(nullptr) {} @@ -48,5 +48,5 @@ class GraphCell::GraphImpl { std::shared_ptr graph_; std::shared_ptr graph_context_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_GRAPH_GRAPH_IMPL_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/model/model.cc b/mindspore-lite/src/extendrt/cxx_api/model/model.cc index f7508972..cb92ac46 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model/model.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model/model.cc @@ -23,7 +23,7 @@ #include "src/common/file_utils.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace { #ifdef USE_GLOG extern "C" { @@ -500,4 +500,4 @@ Status Model::Finalize() { } return impl_->Finalize(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model/model_group.cc b/mindspore-lite/src/extendrt/cxx_api/model/model_group.cc index b6e5d7bc..3c3a1a42 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model/model_group.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model/model_group.cc @@ -22,7 +22,7 @@ #include "src/extendrt/cxx_api/model/model_group_impl.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { ModelGroup::ModelGroup(ModelGroupFlag flags) { impl_ = std::make_shared(flags); if (impl_ == nullptr) { @@ -64,4 +64,4 @@ Status ModelGroup::CalMaxSizeOfWorkspace(ModelType model_type, const std::shared } return impl_->CalMaxSizeOfWorkspace(model_type, ms_context); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model/model_group_impl.cc b/mindspore-lite/src/extendrt/cxx_api/model/model_group_impl.cc index e6082156..7868bc3f 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model/model_group_impl.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model/model_group_impl.cc @@ -26,7 +26,7 @@ #include "extendrt/cxx_api/model/model_impl.h" #include "src/common/common.h" -namespace mindspore { +namespace mindspore::lite { ModelGroupImpl::ModelGroupImpl(ModelGroupFlag flags) : flags_(flags) { static uint32_t g_model_group_id = 0; model_group_id_ = ++g_model_group_id; @@ -132,4 +132,4 @@ Status ModelGroupImpl::CalMaxSizeOfWorkspace(ModelType model_type, const std::sh } return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model/model_group_impl.h b/mindspore-lite/src/extendrt/cxx_api/model/model_group_impl.h index 94e86c9c..92b61f32 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model/model_group_impl.h +++ b/mindspore-lite/src/extendrt/cxx_api/model/model_group_impl.h @@ -28,7 +28,7 @@ #include "include/api/model_group.h" #include "include/api/context.h" -namespace mindspore { +namespace mindspore::lite { class ModelGroupImpl { public: explicit ModelGroupImpl(ModelGroupFlag flags); @@ -48,6 +48,6 @@ class ModelGroupImpl { ModelGroupFlag flags_; uint32_t model_group_id_ = 0; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_MODEL_MODEL_GROUP_IMPL_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/model/model_impl.cc b/mindspore-lite/src/extendrt/cxx_api/model/model_impl.cc index a8c6a2d1..14597f29 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model/model_impl.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model/model_impl.cc @@ -48,7 +48,7 @@ #include "src/common/common.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { const char *const kExecutionPlan = "execution_plan"; constexpr size_t kMaxSectionNum = 100; @@ -1044,4 +1044,4 @@ Status ModelImpl::Finalize() { } return session_->Finalize(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model/model_impl.h b/mindspore-lite/src/extendrt/cxx_api/model/model_impl.h index 86c56d95..3e33d776 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model/model_impl.h +++ b/mindspore-lite/src/extendrt/cxx_api/model/model_impl.h @@ -37,7 +37,7 @@ #ifndef _WIN32 #include #endif -namespace mindspore { +namespace mindspore::lite { class ConverterPlugin { public: typedef int (*ConverterFunc)(const mindspore::api::FuncGraphPtr &, const std::shared_ptr &, @@ -289,5 +289,5 @@ class ModelImpl { uint32_t graph_id_ = 0; std::map model_info_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_MODEL_MODEL_IMPL_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/model/multi_model_runner.cc b/mindspore-lite/src/extendrt/cxx_api/model/multi_model_runner.cc index 2e5cbcaa..90b964b1 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model/multi_model_runner.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model/multi_model_runner.cc @@ -25,7 +25,7 @@ #include "mindspore/core/include/ir/graph_utils.h" #include "include/api/types.h" #include "src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h" -namespace mindspore { +namespace mindspore::lite { namespace { std::mutex g_load_mindir_lock; std::mutex g_config_lock; @@ -397,4 +397,4 @@ std::vector ModelExecutor::GetOutputs() const { } return exec_outputs; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner.cc b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner.cc index 931736d0..26631b8b 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner.cc @@ -20,7 +20,7 @@ #ifdef CAPTURE_SIGNALS #include "src/extendrt/signal_handler.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace { constexpr size_t kMaxSectionNum = 100; constexpr size_t kMaxConfigNumPerSection = 1000; @@ -197,4 +197,4 @@ Status ModelParallelRunner::Predict(const std::vector &inputs, std::ve } return model_parallel_runner_impl_->Predict(inputs, outputs, before, after); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner_impl.cc b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner_impl.cc index 4fcdace8..0784bf0b 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner_impl.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner_impl.cc @@ -21,7 +21,7 @@ #ifdef CAPTURE_SIGNALS #include "src/extendrt/signal_handler.h" #endif -namespace mindspore { +namespace mindspore::lite { Status ModelParallelRunnerImpl::Init(const std::string &model_path, const std::shared_ptr &runner_config) { std::unique_lock l(model_parallel_runner_impl_mutex_); @@ -118,4 +118,4 @@ ModelParallelRunnerImpl::~ModelParallelRunnerImpl() { } MS_LOG(INFO) << "delete model pool done."; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner_impl.h b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner_impl.h index 7bb53b2d..3f4bad0d 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner_impl.h +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_parallel_runner_impl.h @@ -22,7 +22,7 @@ #include #include "src/extendrt/cxx_api/model_pool/model_pool.h" #include "include/api/context.h" -namespace mindspore { +namespace mindspore::lite { class ModelParallelRunnerImpl { public: ModelParallelRunnerImpl() = default; @@ -43,5 +43,5 @@ class ModelParallelRunnerImpl { ModelPool *model_pool_ = nullptr; std::shared_mutex model_parallel_runner_impl_mutex_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_MODEL_POOL_MODEL_PARALLEL_RUNNER_IMPL_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_pool.cc b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_pool.cc index cde73b54..5ece4731 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_pool.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_pool.cc @@ -31,7 +31,7 @@ #include "thread/parallel_thread_pool_manager.h" #endif #include "src/common/config_file.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr int kNumDeviceInfo = 2; constexpr int kNumIndex = 2; @@ -1229,4 +1229,4 @@ Status ModelPool::ParseDeviceIds(const std::shared_ptr &runner_con } return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_pool.h b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_pool.h index fe1374e6..0170a1bb 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_pool.h +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_pool.h @@ -30,7 +30,7 @@ #include "include/api/model_parallel_runner.h" #include "src/extendrt/cxx_api/model_pool/model_worker.h" #include "src/extendrt/cxx_api/model_pool/predict_task_queue.h" -namespace mindspore { +namespace mindspore::lite { using ModelPoolConfig = std::vector>; struct TensorInfo { @@ -175,5 +175,5 @@ class ModelPool { std::shared_ptr allocator_ = nullptr; std::vector threads_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_MODEL_POOL_MODEL_POOL_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_worker.cc b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_worker.cc index abed1765..ab2ccac5 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_worker.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_worker.cc @@ -19,7 +19,7 @@ #include "src/extendrt/numa_adapter.h" #include "src/common/common.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { void ModelWorker::PrintWorkerInfo() { MS_LOG(ERROR) << "worker id: " << worker_config_->worker_id << " | bind core mode: " << worker_config_->context->GetThreadAffinityMode() @@ -275,4 +275,4 @@ Status ModelWorker::Predict(const std::vector &inputs, std::vectorActiveTaskQueue(); return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_worker.h b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_worker.h index be9ad0fa..24d0b2cf 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/model_worker.h +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/model_worker.h @@ -26,7 +26,7 @@ #include #include "include/api/model.h" #include "src/extendrt/cxx_api/model_pool/predict_task_queue.h" -namespace mindspore { +namespace mindspore::lite { class PredictTaskQueue; struct WorkerConfig { @@ -92,5 +92,5 @@ class ModelWorker { bool model_is_nullptr_ = false; int worker_id_ = -1; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_MODEL_POOL_MODEL_WORKER_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/predict_task_queue.cc b/mindspore-lite/src/extendrt/cxx_api/model_pool/predict_task_queue.cc index 2205b7ac..3abfd9ac 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/predict_task_queue.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/predict_task_queue.cc @@ -16,7 +16,7 @@ #include "src/extendrt/cxx_api/model_pool/predict_task_queue.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { PredictTaskQueue::~PredictTaskQueue() { MS_LOG(INFO) << "free predict task queue."; if (predict_task_ != nullptr) { @@ -133,4 +133,4 @@ PredictTask *PredictTaskQueue::GetPredictTask(int node_id, ModelWorker *worker) return predict_task; #endif } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/predict_task_queue.h b/mindspore-lite/src/extendrt/cxx_api/model_pool/predict_task_queue.h index 26fd61d6..81106319 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/predict_task_queue.h +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/predict_task_queue.h @@ -28,7 +28,7 @@ #ifndef USE_HQUEUE #define USE_HQUEUE #endif -namespace mindspore { +namespace mindspore::lite { class ModelWorker; struct PredictTask { PredictTask(const std::vector *in = nullptr, std::vector *out = nullptr, @@ -75,5 +75,5 @@ class PredictTaskQueue { std::condition_variable task_push_cond_; bool predict_task_done_ = false; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_MODEL_POOL_PREDICT_TASK_QUEUE_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/resource_manager.cc b/mindspore-lite/src/extendrt/cxx_api/model_pool/resource_manager.cc index a22224fd..d79948b3 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/resource_manager.cc +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/resource_manager.cc @@ -27,7 +27,7 @@ namespace { constexpr int kNumIndex = 2; } -namespace mindspore { +namespace mindspore::lite { ResourceManager *ResourceManager::GetInstance() { static ResourceManager instance; return &instance; @@ -309,4 +309,4 @@ InitWorkerManager::~InitWorkerManager() { } MS_LOG(INFO) << "~InitWorkerManager() end."; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/resource_manager.h b/mindspore-lite/src/extendrt/cxx_api/model_pool/resource_manager.h index 3ddde36a..f63c23ae 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/resource_manager.h +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/resource_manager.h @@ -25,7 +25,7 @@ #include "src/common/log_adapter.h" #include "include/api/status.h" #include "src/extendrt/cxx_api/model_pool/model_worker.h" -namespace mindspore { +namespace mindspore::lite { class ResourceManager { public: static ResourceManager *GetInstance(); @@ -101,5 +101,5 @@ class InitWorkerManager { // numa id <=> reuse worker init thread std::unordered_map>> all_init_worker_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_MODEL_POOL_RESOURCE_MANAGER_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/model_pool/runner_config.h b/mindspore-lite/src/extendrt/cxx_api/model_pool/runner_config.h index beaeaf5d..80f7b276 100644 --- a/mindspore-lite/src/extendrt/cxx_api/model_pool/runner_config.h +++ b/mindspore-lite/src/extendrt/cxx_api/model_pool/runner_config.h @@ -20,7 +20,7 @@ #include #include #include "include/api/model_parallel_runner.h" -namespace mindspore { +namespace mindspore::lite { struct RunnerConfig::Data { int workers_num = 0; std::shared_ptr context = nullptr; @@ -28,5 +28,5 @@ struct RunnerConfig::Data { std::string config_path = ""; std::vector device_ids; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_MODEL_POOL_RUNNER_CONFIG_H_ diff --git a/mindspore-lite/src/extendrt/cxx_api/serialization.cc b/mindspore-lite/src/extendrt/cxx_api/serialization.cc index d436d726..09b017bf 100644 --- a/mindspore-lite/src/extendrt/cxx_api/serialization.cc +++ b/mindspore-lite/src/extendrt/cxx_api/serialization.cc @@ -26,7 +26,7 @@ #include "extendrt/cxx_api/file_utils.h" #include "src/common/crypto.h" -namespace mindspore { +namespace mindspore::lite { static Status RealPath(const std::string &file, std::string *realpath_str) { MS_EXCEPTION_IF_NULL(realpath_str); char real_path_mem[PATH_MAX] = {0}; @@ -377,4 +377,4 @@ Status Serialization::ExportWeightsCollaborateWithMicro(const Model &, ModelType MS_LOG(ERROR) << "Unsupported feature."; return kMEFailed; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_allocator.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_allocator.cc index 0486ffc7..221ebf0a 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_allocator.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_allocator.cc @@ -22,7 +22,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_symbol.h" #include "extendrt/delegate/ascend_acl/acl_env_guard.h" -namespace mindspore { +namespace mindspore::lite { AclAllocator *CreateAclAllocator() { MS_LOG(INFO) << "LoadAscendApiSymbols for MindSpore lite."; device::ascend::LoadAscendApiSymbols(); @@ -301,4 +301,4 @@ Status AclAllocator::Finalize() { } return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_allocator.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_allocator.h index 7417b0e2..6bc7c426 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_allocator.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_allocator.h @@ -24,7 +24,7 @@ #include "include/api/status.h" #include "src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h" -namespace mindspore { +namespace mindspore::lite { class AclAllocator : public AscendAllocatorPluginImpl { public: AclAllocator() = default; @@ -58,5 +58,5 @@ class AclAllocator : public AscendAllocatorPluginImpl { extern "C" MS_API AclAllocator *CreateAclAllocator(); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_KERNEL_ASCEND_ACL_ALLOCATOR_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_env_guard.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_env_guard.cc index a4633d7c..c4da0979 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_env_guard.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_env_guard.cc @@ -20,7 +20,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" #include "plugin/ascend/res_manager/symbol_interface/acl_rt_symbol.h" -namespace mindspore { +namespace mindspore::lite { std::shared_ptr AclEnvGuard::global_acl_env_ = nullptr; std::vector> AclEnvGuard::model_infers_ = {}; std::mutex AclEnvGuard::global_acl_env_mutex_; @@ -208,4 +208,4 @@ int32_t AclEnvGuard::GetModelNum() { return model_infers_.size(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_env_guard.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_env_guard.h index 3de39b3d..31cc2d5a 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_env_guard.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_env_guard.h @@ -23,7 +23,7 @@ #include "acl/acl_base.h" #include "include/api/visible.h" -namespace mindspore { +namespace mindspore::lite { class AclInitAdapter { public: static AclInitAdapter &GetInstance(); @@ -64,6 +64,6 @@ class AclEnvGuard { aclError errno_; }; extern "C" MS_API bool GetPid(int32_t *pid); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_KERNEL_ASCEND_MODEL_ACL_ENV_GUARD_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_graph_executor.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_graph_executor.cc index fc71804d..fedeee52 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_graph_executor.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_graph_executor.cc @@ -29,7 +29,7 @@ #include "src/common/utils.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr auto kProviderAcl = "litert"; constexpr size_t kSupportedWeightNum = 1; @@ -382,4 +382,4 @@ static std::shared_ptr AclGraphExecutorCreator(const std::sha } REG_DELEGATE(kAscend, kProviderAcl, AclGraphExecutorCreator) -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_graph_executor.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_graph_executor.h index c7c4023c..7d9c4e6f 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_graph_executor.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_graph_executor.h @@ -29,7 +29,7 @@ #include "src/common/common.h" #include "extendrt/delegate/ascend_acl/model_infer.h" -namespace mindspore { +namespace mindspore::lite { class AclGraphExecutor : public LiteGraphExecutor { public: AclGraphExecutor(const std::shared_ptr &context, const ConfigInfos &config_info) { @@ -75,5 +75,5 @@ class AclGraphExecutor : public LiteGraphExecutor { bool load_model_ = false; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_ASCEND_ACL_ACL_GRAPH_EXECUTOR_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_mem_manager.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_mem_manager.cc index d0585abd..d5f7831d 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_mem_manager.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_mem_manager.cc @@ -24,7 +24,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_rt_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { STATUS AclMemManager::UpdateWorkspace(size_t work_size, size_t weight_size, int32_t device_id) { auto it = work_mem_info_map_.find(device_id); if (it == work_mem_info_map_.end()) { @@ -268,4 +268,4 @@ AclMemManager::~AclMemManager() { } } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_mem_manager.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_mem_manager.h index 25a928cc..f962a254 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_mem_manager.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_mem_manager.h @@ -26,7 +26,7 @@ #include #include "include/errorcode.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::STATUS; struct AclModelMemInfo { @@ -74,5 +74,5 @@ class AclMemManager { std::map> weight_mem_info_map_; AclModelMemInfo weight_mem_info_ = {nullptr, 0}; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ASCEND_SRC_ACL_MEM_MANAGER_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_model_options.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_model_options.h index e7eee260..e75134fe 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_model_options.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_model_options.h @@ -25,7 +25,7 @@ #include "mindapi/base/format.h" #include "acl/acl_mdl.h" -namespace mindspore { +namespace mindspore::lite { struct AclModelOptions { int32_t device_id; std::string dump_path; @@ -52,5 +52,5 @@ struct AclDynamicShapeOptions { std::vector> input_shapes; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_KERNEL_ASCEND_SRC_ACL_MODEL_OPTIONS_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_plugin_impl.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_plugin_impl.cc index db07867c..45c64c0c 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_plugin_impl.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_plugin_impl.cc @@ -17,7 +17,7 @@ #include #include "extendrt/delegate/ascend_acl/acl_plugin_impl.h" -namespace mindspore { +namespace mindspore::lite { std::shared_ptr AscendAclExecutorPluginImpl::InitAclGraphExecutor( const std::shared_ptr &context, const ConfigInfos &config_infos) { if (context == nullptr) { @@ -37,4 +37,4 @@ std::shared_ptr AscendAclExecutorPluginImpl::InitAclGraphExecu } AscendAclExecutorPluginImpl *CreateAscendAclExecutorPluginImpl() { return new AscendAclExecutorPluginImpl(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_plugin_impl.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_plugin_impl.h index 09afe610..3ad525df 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_plugin_impl.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/acl_plugin_impl.h @@ -22,7 +22,7 @@ #include "extendrt/delegate/plugin/ascend_acl_executor_plugin.h" #include "extendrt/delegate/ascend_acl/acl_graph_executor.h" -namespace mindspore { +namespace mindspore::lite { class AscendAclExecutorPluginImpl : public lite::AscendAclExecutorPluginImplBase { public: AscendAclExecutorPluginImpl() = default; @@ -34,5 +34,5 @@ class AscendAclExecutorPluginImpl : public lite::AscendAclExecutorPluginImplBase }; extern "C" MS_API AscendAclExecutorPluginImpl *CreateAscendAclExecutorPluginImpl(); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_ASCEND_ACL_ACL_PLUGIN_IMPL_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.cc index 9c1a4ac0..473deb61 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.cc @@ -20,7 +20,7 @@ #include "src/extendrt/cxx_api/dlutils.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace { constexpr auto kAscendkernelPluginSoNmae = "libascend_acl_plugin.so"; constexpr auto kFunCreateAscendAllocatorPluginImpl = "CreateAclAllocator"; @@ -270,4 +270,4 @@ Status AscendAllocatorPlugin::Finalize() { return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h index e2cd99ed..1ecf2458 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h @@ -19,7 +19,7 @@ #include #include #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { class AscendAllocatorPluginImpl { public: AscendAllocatorPluginImpl() = default; @@ -65,5 +65,5 @@ class MS_API AscendAllocatorPlugin { std::shared_ptr ascend_allocator_plugin_impl_ = nullptr; void *get_pid_func_ = nullptr; }; -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/dyn_shape_process.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/dyn_shape_process.cc index d9057240..9a57b08b 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/dyn_shape_process.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/dyn_shape_process.cc @@ -20,7 +20,7 @@ #include "include/errorcode.h" #include "src/common/log.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr auto kInputDimNum = 4; constexpr auto kNHWCNIdx = 0; @@ -286,4 +286,4 @@ bool DynShapeProcess::GetRealImageSize(const std::vector &new_shape MS_LOG(INFO) << "Current height " << height << " width " << width; return true; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/dyn_shape_process.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/dyn_shape_process.h index d623ab3c..c3570f80 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/dyn_shape_process.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/dyn_shape_process.h @@ -29,7 +29,7 @@ using ShapeValueDType = int64_t; using ShapeVector = std::vector; using ShapeArray = std::vector; -namespace mindspore { +namespace mindspore::lite { class DynShapeProcess { public: bool Init(const AclDynamicShapeOptions &options); @@ -49,5 +49,5 @@ class DynShapeProcess { size_t input_data_idx_ = 0; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_KERNEL_ASCEND_MODEL_DYN_SHAPE_PROCESS_H diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/model_infer.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/model_infer.cc index 31fa1965..a6e6563e 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/model_infer.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/model_infer.cc @@ -19,7 +19,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_rt_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { std::mutex g_context_mutex; @@ -255,4 +255,4 @@ bool ModelInfer::Resize(const std::vector> &new_shapes) { } return true; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/model_infer.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/model_infer.h index e119ab15..afea6173 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/model_infer.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/model_infer.h @@ -30,7 +30,7 @@ #include "extendrt/delegate/ascend_acl/acl_model_options.h" #include "extendrt/delegate/ascend_acl/profiling.h" #include "mindspore/core/include/mindapi/base/type_id.h" -namespace mindspore { +namespace mindspore::lite { class ModelInfer { public: @@ -63,5 +63,5 @@ class ModelInfer { std::shared_ptr acl_env_; uint64_t sharable_handle_ = 0; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_KERNEL_ASCEND_MODEL_MODEL_INFER_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.cc index e02358b3..b7dccec0 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.cc @@ -31,7 +31,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr size_t kBatchSizeNum = 1; constexpr size_t kImageSizeHwNum = 2; @@ -1817,4 +1817,4 @@ Status ModelProcess::GetOutputs(const std::vector *outputs) { const_cast *>(outputs)->insert(outputs->end(), new_outputs.begin(), new_outputs.end()); return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.h index c05e609b..9c41f87c 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/model_process.h @@ -36,7 +36,7 @@ #include "mindspore/core/include/mindapi/base/type_id.h" #include "src/extendrt/delegate/ascend_acl/acl_allocator.h" -namespace mindspore { +namespace mindspore::lite { struct AclTensorInfo { void *cur_device_data; void *device_data; @@ -164,5 +164,5 @@ class ModelProcess { uint64_t sharable_handle_ = 0; AclAllocator *allocator_ = nullptr; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_KERNEL_ASCEND_MODEL_MODEL_PROCESS_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/profiling.cc b/mindspore-lite/src/extendrt/delegate/ascend_acl/profiling.cc index 1f32eca1..cde8797d 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/profiling.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/profiling.cc @@ -23,7 +23,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_prof_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { std::map kAicMetrics{{"ArithmeticUtilization", ACL_AICORE_ARITHMETIC_UTILIZATION}, {"PipeUtilization", ACL_AICORE_PIPE_UTILIZATION}, @@ -149,4 +149,4 @@ bool Profiling::StopProfiling(const aclrtStream &stream) { } return true; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_acl/profiling.h b/mindspore-lite/src/extendrt/delegate/ascend_acl/profiling.h index b18226c2..ddb8a5d3 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_acl/profiling.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_acl/profiling.h @@ -21,7 +21,7 @@ #include #include "acl/acl_prof.h" -namespace mindspore { +namespace mindspore::lite { class Profiling { public: @@ -42,5 +42,5 @@ class Profiling { nlohmann::json profiling_json_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_KERNEL_ASCEND_PROFILING_ASCEND_PROFILING_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/aoe_api_tune_process.cc b/mindspore-lite/src/extendrt/delegate/ascend_ge/aoe_api_tune_process.cc index d52e5259..701ab98e 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/aoe_api_tune_process.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/aoe_api_tune_process.cc @@ -26,7 +26,7 @@ #include "mindspore/ccsrc/utils/dlopen_macro.h" #include "cxx_api/acl_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr const char *kSubgraphTurning = "subgraph tuning"; constexpr const char *kOperatorTurning = "operator tuning"; @@ -474,4 +474,4 @@ Status AoeApiTuning::AoeTurningGraph(const std::shared_ptr &session } return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/aoe_api_tune_process.h b/mindspore-lite/src/extendrt/delegate/ascend_ge/aoe_api_tune_process.h index 03e6c278..919e1b0f 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/aoe_api_tune_process.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/aoe_api_tune_process.h @@ -26,7 +26,7 @@ #include "cxx_api/model/acl/acl_model_options.h" #include "ge/ge_api.h" -namespace mindspore { +namespace mindspore::lite { using ConfigInfos = std::map>; class AoeApiTuning { public: @@ -46,5 +46,5 @@ class AoeApiTuning { const std::map &global_options, const std::map &tuning_options); }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_ASCEND_GE_AOE_API_TUNING_PROCESS_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_context_manager.cc b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_context_manager.cc index c6abaa81..8c6cea07 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_context_manager.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_context_manager.cc @@ -19,7 +19,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_rt_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { GeContextManager::GeContextManager() {} GeContextManager::~GeContextManager() { DestroyContext(); } @@ -118,4 +118,4 @@ void GeContextManager::DestroyDefaultStream() { } default_stream_ = nullptr; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_context_manager.h b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_context_manager.h index 79db5cf1..24126499 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_context_manager.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_context_manager.h @@ -21,7 +21,7 @@ #include #include "acl/acl_rt.h" -namespace mindspore { +namespace mindspore::lite { class GeContextManager { public: GeContextManager(); @@ -43,5 +43,5 @@ class GeContextManager { void DestroyDefaultStream(); bool CreateDefaultStream(); }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_ASCEND_GE_GE_CONTEXT_MANAGER_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_device_context.cc b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_device_context.cc index e9fdb12d..5debc212 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_device_context.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_device_context.cc @@ -30,7 +30,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_rt_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { constexpr auto kHcclPluginFileName = "libhccl.so"; typedef enum { @@ -418,4 +418,4 @@ bool GeDeviceContext::FinalizeGe(const std::shared_ptr &inst_context) } return true; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_device_context.h b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_device_context.h index 620b518f..2f514d12 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_device_context.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_device_context.h @@ -25,7 +25,7 @@ #include "include/api/status.h" #include "utils/ms_context.h" -namespace mindspore { +namespace mindspore::lite { class GeDeviceContext { public: GeDeviceContext(); @@ -56,5 +56,5 @@ class GeDeviceContext { static std::weak_ptr global_ge_context_; static std::mutex global_ge_context_mutex_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_ASCEND_GE_GE_DEVICE_CONTEXT_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_dynamic_utils.cc b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_dynamic_utils.cc index bf6ee376..7df1e5a8 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_dynamic_utils.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_dynamic_utils.cc @@ -18,7 +18,7 @@ #include "common/common.h" #include "extendrt/delegate/ascend_ge/ge_utils.h" -namespace mindspore { +namespace mindspore::lite { bool GeDynamicUtils::IsDynamicInputShapes(const std::vector &input_shapes) { return std::any_of(input_shapes.begin(), input_shapes.end(), [](const ShapeVector &shape) { return std::any_of(shape.begin(), shape.end(), [](auto dim) { return dim < 0; }); @@ -462,4 +462,4 @@ bool GeDynamicUtils::GetGraphOneRealShapes(const std::shared_ptr &context, const ConfigInfos &config_infos, std::vector> *dynamic_dims); }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_ASCEND_GE_GE_DYNAMIC_UTILS_H diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_graph_executor.cc b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_graph_executor.cc index bdeb4596..e4af132b 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_graph_executor.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_graph_executor.cc @@ -42,7 +42,7 @@ #include "utils/ms_utils_secure.h" #include "src/extendrt/utils/tensor_default_impl.h" #include "mindspore/core/include/utils/misc.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr auto kProviderGe = "ge"; constexpr auto kDump = "dump"; @@ -1917,4 +1917,4 @@ static std::shared_ptr GeGraphExecutorCreator(const std::shar } REG_DELEGATE(kAscend, kProviderGe, GeGraphExecutorCreator) -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_graph_executor.h b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_graph_executor.h index f91ed28e..d3e39191 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_graph_executor.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_graph_executor.h @@ -34,7 +34,7 @@ #include "extendrt/delegate/ascend_ge/ge_context_manager.h" #include "src/common/common.h" -namespace mindspore { +namespace mindspore::lite { using MSTensorPtr = std::shared_ptr; class MSTensorRel { @@ -247,5 +247,5 @@ class GeSessionManager { static std::map> ge_session_map_; static std::mutex session_mutex_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_ASCEND_GE_GE_GRAPH_EXECUTOR_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_memory_manager.cc b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_memory_manager.cc index 4f3dbb93..9f96a98f 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_memory_manager.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_memory_manager.cc @@ -20,7 +20,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_rt_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { size_t ALIGN_OFFSET(void *addr) { auto extra = (reinterpret_cast(addr) & 0xff); if (extra == 0) { @@ -158,4 +158,4 @@ void GeMemoryManager::FreeAllMemory() { } host_memories_.clear(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_memory_manager.h b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_memory_manager.h index fdbc85ab..4c13f5b5 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_memory_manager.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_memory_manager.h @@ -20,7 +20,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { class GeMemoryManager { public: @@ -52,5 +52,5 @@ class GeMemoryManager { std::vector device_memories_; std::vector host_memories_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_ASCEND_GE_GE_MEMORY_MANAGER_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_plugin_impl.cc b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_plugin_impl.cc index 45527559..f1a02c45 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_plugin_impl.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_plugin_impl.cc @@ -19,7 +19,7 @@ #include "extendrt/delegate/ascend_ge/ge_device_context.h" #include "extendrt/delegate/ascend_ge/ge_utils.h" -namespace mindspore { +namespace mindspore::lite { Status AscendGeExecutorPluginImpl::AdaptGraph(FuncGraphPtr graph) const { return GeUtils::AdaptGraph(graph); } bool AscendGeExecutorPluginImpl::AoeTuning(const FuncGraphPtr &graph, @@ -62,4 +62,4 @@ std::shared_ptr AscendGeExecutorPluginImpl::InitGeGraphExecutor } AscendGeExecutorPluginImpl *CreateAscendGeExecutorPluginImpl() { return new AscendGeExecutorPluginImpl(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_plugin_impl.h b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_plugin_impl.h index bb2014fc..523c8bc2 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_plugin_impl.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_plugin_impl.h @@ -23,7 +23,7 @@ #include "extendrt/delegate/ascend_ge/ge_device_context.h" #include "extendrt/delegate/ascend_ge/ge_graph_executor.h" -namespace mindspore { +namespace mindspore::lite { class AscendGeExecutorPluginImpl : public lite::AscendGeExecutorPluginImplBase { public: AscendGeExecutorPluginImpl() = default; @@ -41,5 +41,5 @@ class AscendGeExecutorPluginImpl : public lite::AscendGeExecutorPluginImplBase { }; extern "C" MS_API AscendGeExecutorPluginImpl *CreateAscendGeExecutorPluginImpl(); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_ASCEND_GE_GE_PLUGIN_IMPL_H_ diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_utils.cc b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_utils.cc index 2372d346..ed85e71f 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_utils.cc +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_utils.cc @@ -27,7 +27,7 @@ #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { static std::string AdjustCnodeName(const PrimitivePtr &prim) { if (prim == nullptr) { MS_LOG(ERROR) << "prim is nullptr."; @@ -144,4 +144,4 @@ std::string GetSocVersion() { return version; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_utils.h b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_utils.h index 0d643489..82f76662 100644 --- a/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_utils.h +++ b/mindspore-lite/src/extendrt/delegate/ascend_ge/ge_utils.h @@ -26,7 +26,7 @@ #include "runtime/hardware_abstract/device_context/device_context.h" #include "extendrt/session/lite_graph_executor.h" #include "extendrt/delegate/ascend_ge/ge_device_context.h" -namespace mindspore { +namespace mindspore::lite { class GeUtils { public: static Status AdaptGraph(const FuncGraphPtr &func_graph); @@ -34,5 +34,5 @@ class GeUtils { }; std::string GetSocVersion(); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_ASCEND_GE_GE_UTILS_H_ diff --git a/mindspore-lite/src/extendrt/delegate/factory.cc b/mindspore-lite/src/extendrt/delegate/factory.cc index fb5b3780..d9162578 100644 --- a/mindspore-lite/src/extendrt/delegate/factory.cc +++ b/mindspore-lite/src/extendrt/delegate/factory.cc @@ -16,7 +16,7 @@ #include "src/extendrt/delegate/factory.h" -namespace mindspore { +namespace mindspore::lite { template DelegateRegistry &DelegateRegistry::GetInstance() { @@ -64,4 +64,4 @@ T DelegateRegistry::GetDelegate(const mindspore::DeviceType &device_type, con template class DelegateRegistry>; -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/factory.h b/mindspore-lite/src/extendrt/delegate/factory.h index 03bdf92a..6115bb73 100644 --- a/mindspore-lite/src/extendrt/delegate/factory.h +++ b/mindspore-lite/src/extendrt/delegate/factory.h @@ -26,7 +26,7 @@ #include "include/api/context.h" #include "src/common/config_infos.h" -namespace mindspore { +namespace mindspore::lite { template using DelegateCreator = std::function &, const ConfigInfos &)>; @@ -63,6 +63,6 @@ class DelegateRegistrar { return creator(context, config_infos); \ }; \ static DelegateRegistrar g_##device_type##provider##Delegate(device_type, provider, &func); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_FACTORY_H_ diff --git a/mindspore-lite/src/extendrt/delegate/graph_executor/litert/func_graph_reuse_manager.cc b/mindspore-lite/src/extendrt/delegate/graph_executor/litert/func_graph_reuse_manager.cc index 4c85f752..10ef2182 100644 --- a/mindspore-lite/src/extendrt/delegate/graph_executor/litert/func_graph_reuse_manager.cc +++ b/mindspore-lite/src/extendrt/delegate/graph_executor/litert/func_graph_reuse_manager.cc @@ -17,7 +17,7 @@ #include "extendrt/delegate/graph_executor/litert/func_graph_reuse_manager.h" #include #include "src/common/common.h" -namespace mindspore { +namespace mindspore::lite { std::mutex mtx_manager_; FuncGraphReuseManager *FuncGraphReuseManager::GetInstance() { @@ -201,4 +201,4 @@ FuncGraphReuseManager::~FuncGraphReuseManager() { all_fb_model_buf_.clear(); MS_LOG(INFO) << "~FuncGraphReuseManager() end."; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/graph_executor/litert/func_graph_reuse_manager.h b/mindspore-lite/src/extendrt/delegate/graph_executor/litert/func_graph_reuse_manager.h index 78f52e48..159db335 100644 --- a/mindspore-lite/src/extendrt/delegate/graph_executor/litert/func_graph_reuse_manager.h +++ b/mindspore-lite/src/extendrt/delegate/graph_executor/litert/func_graph_reuse_manager.h @@ -26,7 +26,7 @@ #include "include/api/status.h" #include "src/common/helper/infer_helpers.h" #include "src/extendrt/session/lite_graph_executor.h" -namespace mindspore { +namespace mindspore::lite { using MSTensorPtr = std::shared_ptr; struct ModelBufPair { @@ -70,5 +70,5 @@ class FuncGraphReuseManager { std::unordered_map> all_in_names_; std::unordered_map> all_out_names_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_GRAPH_EXECUTOR_LITERT_FUNC_GRAPH_REUSE_MANAGER_H_ diff --git a/mindspore-lite/src/extendrt/delegate/graph_executor/litert/graph_executor.cc b/mindspore-lite/src/extendrt/delegate/graph_executor/litert/graph_executor.cc index 2981e459..c7246f8f 100644 --- a/mindspore-lite/src/extendrt/delegate/graph_executor/litert/graph_executor.cc +++ b/mindspore-lite/src/extendrt/delegate/graph_executor/litert/graph_executor.cc @@ -35,7 +35,7 @@ #include "src/executor/kernel_exec.h" #include "src/extendrt/delegate/graph_executor/litert/func_graph_reuse_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace { // leave 200MB for the model struct to make sure the model will not large than 2GB const size_t kOnlineExtractDataSize = 1800 * 1024 * 1024; @@ -459,4 +459,4 @@ static std::shared_ptr LiteRTGraphExecutorCreator(const std:: } REG_DELEGATE(kCPU, litert_provider, LiteRTGraphExecutorCreator); -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate/graph_executor/litert/graph_executor.h b/mindspore-lite/src/extendrt/delegate/graph_executor/litert/graph_executor.h index 9c54929c..0c113c5f 100644 --- a/mindspore-lite/src/extendrt/delegate/graph_executor/litert/graph_executor.h +++ b/mindspore-lite/src/extendrt/delegate/graph_executor/litert/graph_executor.h @@ -30,7 +30,7 @@ #include "src/common/helper/infer_helpers.h" #include "src/common/config_infos.h" -namespace mindspore { +namespace mindspore::lite { class LiteRTGraphExecutor : public LiteGraphExecutor { public: LiteRTGraphExecutor() = default; @@ -74,5 +74,5 @@ class LiteRTGraphExecutor : public LiteGraphExecutor { bool is_shared_fb_buf_ = false; void *fb_model_buf_ = nullptr; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_GRAPH_EXECUTOR_LITERT_GRAPH_EXECUTOR_H_ diff --git a/mindspore-lite/src/extendrt/delegate/graph_executor/litert/litert_plugin_impl.h b/mindspore-lite/src/extendrt/delegate/graph_executor/litert/litert_plugin_impl.h index 57008cc2..a91720e2 100644 --- a/mindspore-lite/src/extendrt/delegate/graph_executor/litert/litert_plugin_impl.h +++ b/mindspore-lite/src/extendrt/delegate/graph_executor/litert/litert_plugin_impl.h @@ -19,13 +19,13 @@ #include "src/common/log_adapter.h" #include "extendrt/delegate/plugin/litert_executor_plugin.h" -namespace mindspore::infer { +namespace mindspore::lite::infer { class LiteRTPluginImpl : public LiteRTExecutorPluginImplBase { public: LiteRTPluginImpl() = default; ~LiteRTPluginImpl() = default; }; -} // namespace mindspore::infer +} // namespace mindspore::lite::infer extern "C" MS_API mindspore::infer::LiteRTExecutorPluginImplBase *CreateLiteRTPluginImpl(); #endif // MINDSPORE_LITE_SRC_EXTENDRT_DELEGATE_GRAPH_EXECUTOR_LITERT_GRAPH_EXECUTO_LITERT_PLUGIN_IMPL_H_ diff --git a/mindspore-lite/src/extendrt/delegate/plugin/litert_executor_plugin.cc b/mindspore-lite/src/extendrt/delegate/plugin/litert_executor_plugin.cc index 59861979..c24208ed 100644 --- a/mindspore-lite/src/extendrt/delegate/plugin/litert_executor_plugin.cc +++ b/mindspore-lite/src/extendrt/delegate/plugin/litert_executor_plugin.cc @@ -22,7 +22,7 @@ #include "extendrt/cxx_api/dlutils.h" #endif -namespace mindspore::infer { +namespace mindspore::lite::infer { namespace { constexpr auto kLiteRtPluginSoName = "libmsplugin-ge-litert.so"; constexpr auto kFunCreateLiteRTPluginImp = "CreateLiteRTPluginImpl"; @@ -75,4 +75,4 @@ bool LiteRTExecutorPlugin::Register() { #endif return true; } -} // namespace mindspore::infer +} // namespace mindspore::lite::infer diff --git a/mindspore-lite/src/extendrt/delegate/plugin/litert_executor_plugin.h b/mindspore-lite/src/extendrt/delegate/plugin/litert_executor_plugin.h index 1875964f..0325570a 100644 --- a/mindspore-lite/src/extendrt/delegate/plugin/litert_executor_plugin.h +++ b/mindspore-lite/src/extendrt/delegate/plugin/litert_executor_plugin.h @@ -19,7 +19,7 @@ #include "src/common/log_adapter.h" #include "mindapi/base/macros.h" -namespace mindspore::infer { +namespace mindspore::lite::infer { class MS_API LiteRTExecutorPlugin { public: static LiteRTExecutorPlugin &GetInstance(); @@ -38,5 +38,5 @@ class LiteRTExecutorPluginImplBase { LiteRTExecutorPluginImplBase() = default; virtual ~LiteRTExecutorPluginImplBase() = default; }; -} // namespace mindspore::infer +} // namespace mindspore::lite::infer #endif // MINDSPORE_LITE_SRC_EXTENDRT_LITERT_EXECUTOR_PLUGIN_H_ diff --git a/mindspore-lite/src/extendrt/delegate_graph_executor.cc b/mindspore-lite/src/extendrt/delegate_graph_executor.cc index 81a4d2e5..8d0116d1 100644 --- a/mindspore-lite/src/extendrt/delegate_graph_executor.cc +++ b/mindspore-lite/src/extendrt/delegate_graph_executor.cc @@ -18,7 +18,7 @@ #include #include "src/extendrt/subgraph_kernel.h" #include "infer/cxx_api/partial_fusion.h" -namespace mindspore { +namespace mindspore::lite { // Graph sink delegate, the whole FuncGraph as a node to execute. void GraphSinkDelegate::ReplaceNodes(const std::shared_ptr &graph) { sink_graph_ = graph; @@ -47,4 +47,4 @@ std::shared_ptr GraphExecutorDelegate::CreateKernel(cons auto kernel = std::make_shared(sink_graph_, executor_); return kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/delegate_graph_executor.h b/mindspore-lite/src/extendrt/delegate_graph_executor.h index b38b233f..4329fb4a 100644 --- a/mindspore-lite/src/extendrt/delegate_graph_executor.h +++ b/mindspore-lite/src/extendrt/delegate_graph_executor.h @@ -27,7 +27,7 @@ #include "src/extendrt/session/lite_graph_executor.h" #include "src/extendrt/subgraph_kernel.h" -namespace mindspore { +namespace mindspore::lite { // Graph sink delegate, the whole FuncGraph as a node to execute. class GraphSinkDelegate { public: @@ -54,5 +54,5 @@ class GraphExecutorDelegate : public GraphSinkDelegate { private: const std::shared_ptr executor_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/src/extendrt/dynamic_mem_allocator.cc b/mindspore-lite/src/extendrt/dynamic_mem_allocator.cc index 0363f2fa..3d4ac01c 100644 --- a/mindspore-lite/src/extendrt/dynamic_mem_allocator.cc +++ b/mindspore-lite/src/extendrt/dynamic_mem_allocator.cc @@ -17,7 +17,7 @@ #include "src/common/utils.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { void *DynamicMemAllocator::Malloc(size_t size) { if (mem_oper_ != nullptr) { return mem_oper_->Malloc(size); @@ -67,4 +67,4 @@ DynamicMemAllocator::DynamicMemAllocator(int node_id) { } mem_oper_ = mem_manager_->GetMemOperator(node_id); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/dynamic_mem_allocator.h b/mindspore-lite/src/extendrt/dynamic_mem_allocator.h index e6e80672..9772eba1 100644 --- a/mindspore-lite/src/extendrt/dynamic_mem_allocator.h +++ b/mindspore-lite/src/extendrt/dynamic_mem_allocator.h @@ -24,7 +24,7 @@ #include "include/api/allocator.h" #include "src/extendrt/dynamic_mem_manager.h" -namespace mindspore { +namespace mindspore::lite { class DynamicMemAllocator : public Allocator { public: explicit DynamicMemAllocator(int node_id); @@ -41,6 +41,6 @@ class DynamicMemAllocator : public Allocator { std::shared_ptr mem_manager_ = nullptr; std::shared_ptr mem_oper_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DYNAMIC_MEM_ALLOCATOR_H_ diff --git a/mindspore-lite/src/extendrt/dynamic_mem_manager.cc b/mindspore-lite/src/extendrt/dynamic_mem_manager.cc index 2b34ec2a..6fa36d84 100644 --- a/mindspore-lite/src/extendrt/dynamic_mem_manager.cc +++ b/mindspore-lite/src/extendrt/dynamic_mem_manager.cc @@ -22,7 +22,7 @@ using mindspore::numa::NUMAAdapter; using mindspore::numa::MemoryInfo; -namespace mindspore { +namespace mindspore::lite { namespace { // Alloc memory aligned according to 64 bytes. static constexpr size_t kMemAlginSize = 64; @@ -328,4 +328,4 @@ std::shared_ptr DynamicMemManager::GetMemOperator(const int node_id } return mem_oper; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/dynamic_mem_manager.h b/mindspore-lite/src/extendrt/dynamic_mem_manager.h index 8fd72d11..955755ea 100644 --- a/mindspore-lite/src/extendrt/dynamic_mem_manager.h +++ b/mindspore-lite/src/extendrt/dynamic_mem_manager.h @@ -25,7 +25,7 @@ #include #include "src/extendrt/numa_adapter.h" -namespace mindspore { +namespace mindspore::lite { struct Block { // used_ may be true when ref_count_ == 0 bool used_ = false; @@ -83,6 +83,6 @@ class DynamicMemManager { std::map> nodes_mem_; std::mutex mutex_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_DYNAMIC_MEM_MANAGER_H_ diff --git a/mindspore-lite/src/extendrt/execution_flow.cc b/mindspore-lite/src/extendrt/execution_flow.cc index 4277bd5f..859e1359 100644 --- a/mindspore-lite/src/extendrt/execution_flow.cc +++ b/mindspore-lite/src/extendrt/execution_flow.cc @@ -19,7 +19,7 @@ #include "src/litert/kernel_exec_util.h" #include "src/executor/sub_graph_kernel.h" -namespace mindspore::infer { +namespace mindspore::lite::infer { ExecutionFlow::~ExecutionFlow() { for (auto kernel : kernels_) { delete kernel; @@ -110,4 +110,4 @@ mindspore::kernel::SubGraphType ExecutionFlow::GetSubGraphType(abstract::Kernel return kernel::kNotSubGraph; } } -} // namespace mindspore::infer +} // namespace mindspore::lite::infer diff --git a/mindspore-lite/src/extendrt/execution_flow.h b/mindspore-lite/src/extendrt/execution_flow.h index 3ea180b7..899ce293 100644 --- a/mindspore-lite/src/extendrt/execution_flow.h +++ b/mindspore-lite/src/extendrt/execution_flow.h @@ -24,7 +24,7 @@ #include "infer/execution_flow.h" #include "src/executor/sub_graph_kernel.h" -namespace mindspore::infer { +namespace mindspore::lite::infer { class ExecutionFlow : public abstract::ExecutionFlow { public: ExecutionFlow() = default; @@ -74,6 +74,6 @@ class ExecutionFlow : public abstract::ExecutionFlow { abstract::KernelCallBack after_; }; using ExecutionFlowPtr = std::shared_ptr; -} // namespace mindspore::infer +} // namespace mindspore::lite::infer #endif // MINDSPORE_LITE_SRC_EXTENDRT_EXECUTION_FLOW_H_ diff --git a/mindspore-lite/src/extendrt/execution_plan.cc b/mindspore-lite/src/extendrt/execution_plan.cc index 936bbbf4..17c5edf2 100644 --- a/mindspore-lite/src/extendrt/execution_plan.cc +++ b/mindspore-lite/src/extendrt/execution_plan.cc @@ -22,7 +22,7 @@ #include "litert/kernel_exec_util.h" #include "executor/sub_graph_kernel.h" -namespace mindspore::infer { +namespace mindspore::lite::infer { ExecutionPlan::~ExecutionPlan() { delete input_isolate_map_; delete output_isolate_map_; @@ -89,4 +89,4 @@ bool ExecutionPlan::CalcTensorRefCount(abstract::Kernel *subgraph_kernel) { } return true; } -} // namespace mindspore::infer +} // namespace mindspore::lite::infer diff --git a/mindspore-lite/src/extendrt/execution_plan.h b/mindspore-lite/src/extendrt/execution_plan.h index 46edbb24..804a82bf 100644 --- a/mindspore-lite/src/extendrt/execution_plan.h +++ b/mindspore-lite/src/extendrt/execution_plan.h @@ -25,7 +25,7 @@ #include "infer/execution_plan.h" #include "src/executor/sub_graph_kernel.h" -namespace mindspore::infer { +namespace mindspore::lite::infer { /** * ExecutionPlan: Execution plan for cloud infer */ @@ -105,6 +105,6 @@ class ExecutionPlan : public abstract::ExecutionPlan { std::unordered_map *input_isolate_map_ = nullptr; std::unordered_map *output_isolate_map_ = nullptr; }; -} // namespace mindspore::infer +} // namespace mindspore::lite::infer #endif // MINDSPORE_LITE_SRC_EXTENDRT_EXECUTION_PLAN_H_ diff --git a/mindspore-lite/src/extendrt/factory.h b/mindspore-lite/src/extendrt/factory.h index 5466df10..1be52a65 100644 --- a/mindspore-lite/src/extendrt/factory.h +++ b/mindspore-lite/src/extendrt/factory.h @@ -23,7 +23,7 @@ #include #include "include/common/utils/utils.h" -namespace mindspore { +namespace mindspore::lite { inline enum DeviceType g_device_target = kInvalidDeviceType; static inline LogStream &operator<<(LogStream &stream, DeviceType device_type) { @@ -87,5 +87,5 @@ class Registrar { #define API_FACTORY_CREATOR(DERIVE_CLASS) []() { return std::make_shared(); } #define API_FACTORY_REG(BASE, DERIVE) static const Registrar g_api_##DERIVE##_reg(API_FACTORY_CREATOR(DERIVE)); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXTENDRT_FACTORY_H diff --git a/mindspore-lite/src/extendrt/graph_scheduler.cc b/mindspore-lite/src/extendrt/graph_scheduler.cc index e9e89bd3..02327965 100644 --- a/mindspore-lite/src/extendrt/graph_scheduler.cc +++ b/mindspore-lite/src/extendrt/graph_scheduler.cc @@ -16,8 +16,8 @@ #include "extendrt/graph_scheduler.h" #include "extendrt/graph_compiler.h" -namespace mindspore { +namespace mindspore::lite { namespace infer { ExcutionPlan GraphScheduler::Schedule(const CompileResult &compile_result) { return {}; } } // namespace infer -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/graph_scheduler.h b/mindspore-lite/src/extendrt/graph_scheduler.h index 4870615a..0bd52613 100644 --- a/mindspore-lite/src/extendrt/graph_scheduler.h +++ b/mindspore-lite/src/extendrt/graph_scheduler.h @@ -27,7 +27,7 @@ #include "ir/func_graph.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { namespace infer { struct ScheduleStrategy {}; struct GraphCompilerInfo; @@ -47,5 +47,5 @@ class GraphScheduler : public std::enable_shared_from_this { ExcutionPlan Schedule(const CompileResult &); }; } // namespace infer -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/src/extendrt/infer_session.cc b/mindspore-lite/src/extendrt/infer_session.cc index d174f8ee..ca904832 100644 --- a/mindspore-lite/src/extendrt/infer_session.cc +++ b/mindspore-lite/src/extendrt/infer_session.cc @@ -22,7 +22,7 @@ #include "extendrt/delegate/plugin/ascend_acl_executor_plugin.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { namespace { void AscendPluginRegistration(const std::shared_ptr &ascend_device, bool use_experimental_rts) { constexpr auto default_npu_provider = "ge"; @@ -121,4 +121,4 @@ Status InferSession::Finalize() { MS_LOG(INFO) << "Finalize is only implemented in delegate_session now."; return kLiteError; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/infer_session.h b/mindspore-lite/src/extendrt/infer_session.h index b2e36b7c..90c4b037 100644 --- a/mindspore-lite/src/extendrt/infer_session.h +++ b/mindspore-lite/src/extendrt/infer_session.h @@ -29,7 +29,7 @@ #include "common/mutable_tensor_impl.h" #include "src/common/config_infos.h" -namespace mindspore { +namespace mindspore::lite { class InferSession : public std::enable_shared_from_this { public: virtual ~InferSession() = default; @@ -158,5 +158,5 @@ class InferSession : public std::enable_shared_from_this { static SessionType SelectSession(const std::shared_ptr &context, bool use_experimental_rts = false); uint64_t sharable_handle_ = 0; }; // namespace mindspore -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/src/extendrt/mindir_loader/abstract_base_model.h b/mindspore-lite/src/extendrt/mindir_loader/abstract_base_model.h index 2c30f0b0..80076d88 100644 --- a/mindspore-lite/src/extendrt/mindir_loader/abstract_base_model.h +++ b/mindspore-lite/src/extendrt/mindir_loader/abstract_base_model.h @@ -27,7 +27,7 @@ using Model = mindspore::lite::Model; using LiteGraph = mindspore::lite::LiteGraph; -namespace mindspore::infer { +namespace mindspore::lite::infer { class AbstractBaseModel : public Model { public: virtual bool ModelVerify() const = 0; @@ -38,6 +38,6 @@ class AbstractBaseModel : public Model { const LiteGraph::Node *node, lite::InnerContext *context, TypeId prefer_data_type) = 0; }; -} // namespace mindspore::infer +} // namespace mindspore::lite::infer #endif // MINDSPORE_LITE_SRC_EXTENDRT_MINDIR_LOADER_ABSTRACT_BASE_MODEL_H_ diff --git a/mindspore-lite/src/extendrt/mindir_loader/abstract_kernel.h b/mindspore-lite/src/extendrt/mindir_loader/abstract_kernel.h index 4421ed11..655c3666 100644 --- a/mindspore-lite/src/extendrt/mindir_loader/abstract_kernel.h +++ b/mindspore-lite/src/extendrt/mindir_loader/abstract_kernel.h @@ -24,7 +24,7 @@ using mindspore::kernel::Kernel; -namespace mindspore::infer { +namespace mindspore::lite::infer { class Abstractkernel : public Kernel { public: virtual int Train() = 0; @@ -51,6 +51,6 @@ class Abstractkernel : public Kernel { virtual const std::vector &out_tensors() const = 0; }; -} // namespace mindspore::infer +} // namespace mindspore::lite::infer #endif // MINDSPORE_LITE_SRC_EXTENDRT_MINDIR_LOADER_ABSTRACT_KERNEL_H_ diff --git a/mindspore-lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.cc b/mindspore-lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.cc index b6a74d64..2251d023 100644 --- a/mindspore-lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.cc +++ b/mindspore-lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.cc @@ -20,7 +20,7 @@ #include "extendrt/mindir_loader/mindir_model/inner_kernel.h" #include "abstract/abstract_value.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int InnerKernel::Prepare() { auto inputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(this->in_tensors_); auto outputs = CloudTensorUtils::LiteTensorToKernelTensorPtrVec(this->out_tensors_); @@ -45,4 +45,4 @@ int InnerKernel::ReSize() { return this->kernel_mod_->Init(inputs, outputs) ? mindspore::lite::RET_OK : mindspore::lite::RET_ERROR; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.h b/mindspore-lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.h index 488328e1..be8dba08 100644 --- a/mindspore-lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.h +++ b/mindspore-lite/src/extendrt/mindir_loader/mindir_model/inner_kernel.h @@ -31,7 +31,7 @@ using mindspore::infer::Abstractkernel; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class InnerKernel : public Abstractkernel { public: InnerKernel() = default; @@ -98,6 +98,6 @@ class InnerKernel : public Abstractkernel { std::vector out_tensors_; const mindspore::lite::InnerContext *ms_context_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_EXTENDRT_MINDIR_LOADER_MINDIR_MODEL_INNER_KERNEL_H_ diff --git a/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_mock.cc b/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_mock.cc index 9d0d5bbf..450f4d1e 100644 --- a/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_mock.cc +++ b/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_mock.cc @@ -19,9 +19,9 @@ #include "runtime/hardware_abstract/kernel_base/kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { KernelErrorCode KernelMod::Resize(const std::vector &inputs, const std::vector &outputs) { return KRET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_util.cc b/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_util.cc index 2c1699d2..ea3b2edb 100644 --- a/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_util.cc +++ b/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_util.cc @@ -23,7 +23,7 @@ #include "runtime/hardware_abstract/kernel_base/ms_factory.h" #include "mindspore/ops/kernel/cpu/cpu_kernel_mod.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { std::shared_ptr KernelModUtil::GetInnerKernel( const std::vector &in_tensors, const std::vector &out_tensors, const mindspore::lite::LiteGraph::Node *node, lite::InnerContext *context) { @@ -38,4 +38,4 @@ std::shared_ptr KernelModUtil::GetInnerKernel( auto base_operator = std::reinterpret_pointer_cast(node->base_operator_); return std::make_shared(kernel_mod, base_operator, in_tensors, out_tensors, context); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_util.h b/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_util.h index bdd687f7..b898dfef 100644 --- a/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_util.h +++ b/mindspore-lite/src/extendrt/mindir_loader/mindir_model/kernel_mod_util.h @@ -24,13 +24,13 @@ #include "src/tensor.h" #include "include/model.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class KernelModUtil { public: static std::shared_ptr GetInnerKernel( const std::vector &in_tensors, const std::vector &out_tensors, const mindspore::lite::LiteGraph::Node *node, lite::InnerContext *context); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_EXTENDRT_MINDIR_LOADER_MINDIR_MODEL_KERNEL_MOD_UTIL_H_ diff --git a/mindspore-lite/src/extendrt/mindir_loader/model_loader.cc b/mindspore-lite/src/extendrt/mindir_loader/model_loader.cc index 0c68e11d..bc785aba 100644 --- a/mindspore-lite/src/extendrt/mindir_loader/model_loader.cc +++ b/mindspore-lite/src/extendrt/mindir_loader/model_loader.cc @@ -16,7 +16,7 @@ #include "extendrt/mindir_loader/model_loader.h" -namespace mindspore::infer { +namespace mindspore::lite::infer { constexpr size_t kMaxModelBufferSize = static_cast(1024) * 1024 * 1024 * 2; int ModelLoader::InitModelBuffer(AbstractBaseModel *model, const char *model_buf, size_t size, bool take_buf) { @@ -51,4 +51,4 @@ ModelLoaderRegistry *ModelLoaderRegistry::GetInstance() { static ModelLoaderRegistry instance; return &instance; } -} // namespace mindspore::infer +} // namespace mindspore::lite::infer diff --git a/mindspore-lite/src/extendrt/mindir_loader/model_loader.h b/mindspore-lite/src/extendrt/mindir_loader/model_loader.h index c21fb954..a33bd2b4 100644 --- a/mindspore-lite/src/extendrt/mindir_loader/model_loader.h +++ b/mindspore-lite/src/extendrt/mindir_loader/model_loader.h @@ -21,7 +21,7 @@ #include "extendrt/mindir_loader/abstract_base_model.h" -namespace mindspore::infer { +namespace mindspore::lite::infer { class ModelLoader { public: virtual AbstractBaseModel *ImportModel(const char *model_buf, size_t size, bool take_buf) = 0; @@ -63,6 +63,6 @@ class ModelLoaderRegistrar { #define REG_MODEL_LOADER(model_type, model_loader_creator) \ static ModelLoaderRegistrar g_##model_type##model_loader##ModelLoader(model_type, model_loader_creator); -} // namespace mindspore::infer +} // namespace mindspore::lite::infer #endif // MINDSPORE_LITE_SRC_EXTENDRT_MINDIR_LOADER_MODEL_LOADER_H_ diff --git a/mindspore-lite/src/extendrt/mock/lite_runtime/converters.cc b/mindspore-lite/src/extendrt/mock/lite_runtime/converters.cc index b12f1536..1dc9bd52 100644 --- a/mindspore-lite/src/extendrt/mock/lite_runtime/converters.cc +++ b/mindspore-lite/src/extendrt/mock/lite_runtime/converters.cc @@ -18,7 +18,7 @@ #include "src/common/log_adapter.h" #include "src/common/utils.h" -namespace mindspore { +namespace mindspore::lite { constexpr static int kMaxNumOfDevices = 3; constexpr static int kDefaultThreadNumTwo = 2; constexpr static int kDefaultThreadNumFour = 4; @@ -135,4 +135,4 @@ std::shared_ptr ContextUtils::Convert(Context *context) { } return inner_context; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/mock/lite_runtime/converters.h b/mindspore-lite/src/extendrt/mock/lite_runtime/converters.h index e8943835..a5ff098a 100644 --- a/mindspore-lite/src/extendrt/mock/lite_runtime/converters.h +++ b/mindspore-lite/src/extendrt/mock/lite_runtime/converters.h @@ -26,7 +26,7 @@ #include "src/litert/inner_context.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { class ContextUtils { public: static std::shared_ptr Convert(Context *context); @@ -63,6 +63,6 @@ inline lite::QuantizationType A2L_ConvertQT(mindspore::QuantizationType qt) { } Status A2L_ConvertConfig(const TrainCfg *a_train_cfg, lite::TrainCfg *l_train_cfg); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_CXX_API_CONVERTERS_H_ diff --git a/mindspore-lite/src/extendrt/mock/lite_runtime/populate/arithmetic_populate.cc b/mindspore-lite/src/extendrt/mock/lite_runtime/populate/arithmetic_populate.cc index da617451..607bac80 100644 --- a/mindspore-lite/src/extendrt/mock/lite_runtime/populate/arithmetic_populate.cc +++ b/mindspore-lite/src/extendrt/mock/lite_runtime/populate/arithmetic_populate.cc @@ -37,7 +37,7 @@ using mindspore::schema::PrimitiveType_NotEqual; using mindspore::schema::PrimitiveType_RealDiv; using mindspore::schema::PrimitiveType_SquaredDifference; -namespace mindspore { +namespace mindspore::lite { ArithmeticParameter *PopulateArithmeticCommonPara(void *base_operator) { MS_CHECK_TRUE_RET(base_operator != nullptr, nullptr); auto base_operator_ptr = static_cast(base_operator); @@ -89,4 +89,4 @@ REG_BASE_POPULATE(PrimitiveType_FloorMod, PopulateArithmetic) REG_BASE_POPULATE(PrimitiveType_Mod, PopulateArithmetic) REG_BASE_POPULATE(PrimitiveType_SquaredDifference, PopulateArithmetic) REG_BASE_POPULATE(PrimitiveType_BiasAddGrad, PopulateArithmetic) -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/mock/lite_runtime/populate/arithmetic_populate.h b/mindspore-lite/src/extendrt/mock/lite_runtime/populate/arithmetic_populate.h index 404acb0e..d335ba8b 100644 --- a/mindspore-lite/src/extendrt/mock/lite_runtime/populate/arithmetic_populate.h +++ b/mindspore-lite/src/extendrt/mock/lite_runtime/populate/arithmetic_populate.h @@ -18,8 +18,8 @@ #include "nnacl_c/arithmetic_parameter.h" -namespace mindspore { +namespace mindspore::lite { ArithmeticParameter *PopulateArithmeticCommonPara(void *prim); OpParameter *PopulateArithmetic(void *primitive); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_COMMON_OPS_POPULATE_ARITHMETIC_POPULATE_H_ diff --git a/mindspore-lite/src/extendrt/mock/lite_runtime/populate/base_operator_populate_register.cc b/mindspore-lite/src/extendrt/mock/lite_runtime/populate/base_operator_populate_register.cc index 97aafc7d..d170e7b7 100644 --- a/mindspore-lite/src/extendrt/mock/lite_runtime/populate/base_operator_populate_register.cc +++ b/mindspore-lite/src/extendrt/mock/lite_runtime/populate/base_operator_populate_register.cc @@ -16,9 +16,9 @@ #include "extendrt/mock/lite_runtime/populate/base_operator_populate_register.h" -namespace mindspore { +namespace mindspore::lite { BaseOperatorPopulateRegistry *BaseOperatorPopulateRegistry::GetInstance() { static BaseOperatorPopulateRegistry registry; return ®istry; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/mock/lite_runtime/populate/base_operator_populate_register.h b/mindspore-lite/src/extendrt/mock/lite_runtime/populate/base_operator_populate_register.h index 1a6e1251..47693b77 100644 --- a/mindspore-lite/src/extendrt/mock/lite_runtime/populate/base_operator_populate_register.h +++ b/mindspore-lite/src/extendrt/mock/lite_runtime/populate/base_operator_populate_register.h @@ -30,7 +30,7 @@ #include "src/common/utils.h" #include "src/common/log_util.h" -namespace mindspore { +namespace mindspore::lite { constexpr int kOffsetTwo = 2; constexpr int kOffsetThree = 3; constexpr size_t kMinShapeSizeTwo = 2; @@ -95,5 +95,5 @@ class BaseRegistry { #define REG_BASE_POPULATE(primitive_type, creator) \ static BaseRegistry g_##primitive_type##base_populate(primitive_type, creator); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_COMMON_OPS_POPULATE_POPULATE_REGISTER_H_ diff --git a/mindspore-lite/src/extendrt/model_manager.cc b/mindspore-lite/src/extendrt/model_manager.cc index 4125df26..e9ec357d 100644 --- a/mindspore-lite/src/extendrt/model_manager.cc +++ b/mindspore-lite/src/extendrt/model_manager.cc @@ -21,7 +21,7 @@ #include #include "include/api/model_group.h" -namespace mindspore { +namespace mindspore::lite { bool JudgeMergeFlag(ModelGroupFlag input_flag, ModelGroupFlag cur_flag) { return (input_flag == ModelGroupFlag::kShareWeight && cur_flag == ModelGroupFlag::kShareWorkspace) || (cur_flag == ModelGroupFlag::kShareWeight && input_flag == ModelGroupFlag::kShareWorkspace); @@ -49,4 +49,4 @@ ModelManager::~ModelManager() { model_path_set_.clear(); model_buff_set_.clear(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/model_manager.h b/mindspore-lite/src/extendrt/model_manager.h index 6ddd361d..c75a90bb 100644 --- a/mindspore-lite/src/extendrt/model_manager.h +++ b/mindspore-lite/src/extendrt/model_manager.h @@ -25,7 +25,7 @@ #include #include "include/api/model_group.h" -namespace mindspore { +namespace mindspore::lite { class ModelManager { public: ModelManager() {} @@ -49,6 +49,6 @@ class ModelManager { std::map model_path_set_; std::set> model_buff_set_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_MODEL_MANAGER_H_ diff --git a/mindspore-lite/src/extendrt/numa_adapter.cc b/mindspore-lite/src/extendrt/numa_adapter.cc index 459da65b..16ebd7dc 100644 --- a/mindspore-lite/src/extendrt/numa_adapter.cc +++ b/mindspore-lite/src/extendrt/numa_adapter.cc @@ -22,7 +22,7 @@ #include "src/common/common.h" #include "src/common/utils.h" -namespace mindspore { +namespace mindspore::lite { namespace numa { namespace { static constexpr auto kNodeBase = "/sys/devices/system/node/node"; @@ -222,4 +222,4 @@ NUMAAdapter::~NUMAAdapter() { MS_LOG(DEBUG) << "~NUMAAdapter() end."; } } // namespace numa -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/numa_adapter.h b/mindspore-lite/src/extendrt/numa_adapter.h index 8d42ece7..cbca8434 100644 --- a/mindspore-lite/src/extendrt/numa_adapter.h +++ b/mindspore-lite/src/extendrt/numa_adapter.h @@ -22,7 +22,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace numa { struct bitmask { uint64_t size; @@ -73,5 +73,5 @@ class NUMAAdapter { std::unordered_map> node_cpu_list_; }; } // namespace numa -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_NUMA_ADAPTER_H_ diff --git a/mindspore-lite/src/extendrt/session/delegate_session.cc b/mindspore-lite/src/extendrt/session/delegate_session.cc index 579efa3a..aba538cf 100644 --- a/mindspore-lite/src/extendrt/session/delegate_session.cc +++ b/mindspore-lite/src/extendrt/session/delegate_session.cc @@ -29,7 +29,7 @@ #include "common/common.h" #include "src/extendrt/session/lite_graph_executor.h" #include "extendrt/delegate/plugin/ascend_acl_executor_plugin.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr auto kIsAdapted = "is_adapted"; constexpr size_t kSupportedWeightNum = 1; @@ -444,4 +444,4 @@ static std::shared_ptr DelegateSessionCreator(const std::shared_pt return session; } REG_SESSION(kDelegateSession, DelegateSessionCreator); -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/session/delegate_session.h b/mindspore-lite/src/extendrt/session/delegate_session.h index e5658d80..4e0c51d5 100644 --- a/mindspore-lite/src/extendrt/session/delegate_session.h +++ b/mindspore-lite/src/extendrt/session/delegate_session.h @@ -25,7 +25,7 @@ #include "runtime/hardware_abstract/device_context/device_context.h" #include "extendrt/session/lite_graph_executor.h" #include "extendrt/delegate/ascend_acl/ascend_allocator_plugin.h" -namespace mindspore { +namespace mindspore::lite { /// \brief Delegate Session implementation, use delegate api for inference. struct DelegateGraphInfo { std::vector inputs; @@ -79,6 +79,6 @@ class GraphSinkSession : public InferSession { std::shared_ptr context_; ConfigInfos config_infos_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXTENDRT_SESSION_DELEGATE_SESSION_H_ diff --git a/mindspore-lite/src/extendrt/session/factory.cc b/mindspore-lite/src/extendrt/session/factory.cc index e0ce297d..6ac98783 100644 --- a/mindspore-lite/src/extendrt/session/factory.cc +++ b/mindspore-lite/src/extendrt/session/factory.cc @@ -20,7 +20,7 @@ #include "extendrt/session/type.h" #include "extendrt/infer_session.h" -namespace mindspore { +namespace mindspore::lite { SessionRegistry &SessionRegistry::GetInstance() { static SessionRegistry instance; return instance; @@ -39,4 +39,4 @@ std::shared_ptr SessionRegistry::GetSession(const mindspore::Sessi } return it->second(ctx, config_info); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/session/factory.h b/mindspore-lite/src/extendrt/session/factory.h index d0809e79..546cbb29 100644 --- a/mindspore-lite/src/extendrt/session/factory.h +++ b/mindspore-lite/src/extendrt/session/factory.h @@ -22,7 +22,7 @@ #include "extendrt/session/type.h" #include "extendrt/infer_session.h" #include "include/api/context.h" -namespace mindspore { +namespace mindspore::lite { using InferSessionRegFunc = std::function(const std::shared_ptr &, const ConfigInfos &)>; @@ -51,6 +51,6 @@ class SessionRegistrar { }; #define REG_SESSION(session_type, creator) static SessionRegistrar g_##session_type##Session(session_type, creator); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXTENDRT_SESSION_FACTORY_H_ diff --git a/mindspore-lite/src/extendrt/session/lite_graph_executor.h b/mindspore-lite/src/extendrt/session/lite_graph_executor.h index 99610698..da167a75 100644 --- a/mindspore-lite/src/extendrt/session/lite_graph_executor.h +++ b/mindspore-lite/src/extendrt/session/lite_graph_executor.h @@ -25,7 +25,7 @@ #include "runtime/hardware_abstract/device_context/device_context.h" #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { /// \brief Adaptive Graph Executor for cloud Graph Executor to solve interface conflicts. class LiteGraphExecutor { public: @@ -100,6 +100,6 @@ class LiteGraphExecutor { MSKernelCallBack after_; uint64_t sharable_handle_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXTENDRT_SESSION_LITE_GRAPH_EXECUTOR_H_ diff --git a/mindspore-lite/src/extendrt/session/type.h b/mindspore-lite/src/extendrt/session/type.h index 076fa892..2884f725 100644 --- a/mindspore-lite/src/extendrt/session/type.h +++ b/mindspore-lite/src/extendrt/session/type.h @@ -19,7 +19,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { enum SessionType { kDefaultSession = 0, kSingleOpSession, @@ -29,5 +29,5 @@ enum SessionType { kMemoryOffloadSession, kNoneSession }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EXTENDRT_SESSION_TYPE_H_ diff --git a/mindspore-lite/src/extendrt/signal_handler.cc b/mindspore-lite/src/extendrt/signal_handler.cc index f1216507..10e24c74 100644 --- a/mindspore-lite/src/extendrt/signal_handler.cc +++ b/mindspore-lite/src/extendrt/signal_handler.cc @@ -21,7 +21,7 @@ #include #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace { static std::map kSigs = { {SIGSEGV, "SIGSEGV"}, {SIGABRT, "SIGABRT"}, {SIGFPE, "SIGFPE"}, {SIGBUS, "SIGBUS"}, {SIGILL, "SIGILL"}}; @@ -77,4 +77,4 @@ void CaptureSignal() { } capture_already = true; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/signal_handler.h b/mindspore-lite/src/extendrt/signal_handler.h index 9b309da3..61bdb819 100644 --- a/mindspore-lite/src/extendrt/signal_handler.h +++ b/mindspore-lite/src/extendrt/signal_handler.h @@ -17,9 +17,9 @@ #ifndef MINDSPORE_LITE_SRC_EXTENDRT_SIGNAL_HANDLER_H_ #define MINDSPORE_LITE_SRC_EXTENDRT_SIGNAL_HANDLER_H_ -namespace mindspore { +namespace mindspore::lite { // capture exception signals void CaptureSignal(); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_SIGNAL_HANDLER_H_ diff --git a/mindspore-lite/src/extendrt/subgraph_kernel.cc b/mindspore-lite/src/extendrt/subgraph_kernel.cc index d077753f..9a9d8a04 100644 --- a/mindspore-lite/src/extendrt/subgraph_kernel.cc +++ b/mindspore-lite/src/extendrt/subgraph_kernel.cc @@ -14,7 +14,7 @@ * limitations under the License. */ #include "src/extendrt/subgraph_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { bool SubgraphKernel::Launch(const std::vector &inputs, const std::vector &workspace, const std::vector &outputs, void *stream_ptr) { std::vector in; @@ -29,4 +29,4 @@ bool SubgraphKernel::Init(const std::vector &inputs, const std::vect } int SubgraphKernel::Resize(const std::vector &inputs, const std::vector &outputs) { return 0; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/extendrt/subgraph_kernel.h b/mindspore-lite/src/extendrt/subgraph_kernel.h index 769e4040..a32c4837 100644 --- a/mindspore-lite/src/extendrt/subgraph_kernel.h +++ b/mindspore-lite/src/extendrt/subgraph_kernel.h @@ -25,7 +25,7 @@ #include "runtime/hardware_abstract/kernel_base/common_utils.h" #include "src/extendrt/session/lite_graph_executor.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SubgraphKernel { public: SubgraphKernel(FuncGraphPtr subgraph, std::shared_ptr executor) @@ -43,5 +43,5 @@ class SubgraphKernel { FuncGraphPtr subgraph_; std::shared_ptr executor_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/extendrt/utils/func_graph_utils.cc b/mindspore-lite/src/extendrt/utils/func_graph_utils.cc index 931ae1d3..9dcfcdc0 100644 --- a/mindspore-lite/src/extendrt/utils/func_graph_utils.cc +++ b/mindspore-lite/src/extendrt/utils/func_graph_utils.cc @@ -37,7 +37,7 @@ #include "mindspore/core/include/ir/func_graph_flag.h" #include "ir/tensor_new.h" -namespace mindspore { +namespace mindspore::lite { const PrimitivePtr kPrimMakeTupleV2 = std::make_shared("make_tuple"); ValuePtr FuncGraphUtils::GetNodeValuePtr(AnfNodePtr input_node) { if (input_node == nullptr) { @@ -555,4 +555,4 @@ AnfNodePtr FuncGraphUtils::RefSubGraphNode(const FuncGraphPtr &fg, const AnfNode } return eqv[node]; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/utils/func_graph_utils.h b/mindspore-lite/src/extendrt/utils/func_graph_utils.h index a15ecd71..18efbe37 100644 --- a/mindspore-lite/src/extendrt/utils/func_graph_utils.h +++ b/mindspore-lite/src/extendrt/utils/func_graph_utils.h @@ -33,7 +33,7 @@ #include "runtime/hardware_abstract/kernel_base/kernel.h" #include "include/common/utils/anfalgo.h" -namespace mindspore { +namespace mindspore::lite { using AnfWithOutIndex = std::pair; using kernel::BaseOperatorPtr; @@ -76,6 +76,6 @@ class FuncGraphUtils { private: static ValuePtr GetNodeValuePtr(AnfNodePtr input_node); }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_UTILS_FUNC_GRAPH_UTILS_H_ diff --git a/mindspore-lite/src/extendrt/utils/segment_utils.h b/mindspore-lite/src/extendrt/utils/segment_utils.h index af4925c8..0910e370 100644 --- a/mindspore-lite/src/extendrt/utils/segment_utils.h +++ b/mindspore-lite/src/extendrt/utils/segment_utils.h @@ -27,10 +27,10 @@ #include "utils/hash_map.h" #include "ir/anf.h" -namespace mindspore { +namespace mindspore::lite { namespace infer { std::tuple TransformSegmentToAnfGraph(const AnfNodePtrList &lst); } // namespace infer -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_VM_SEGMENT_UTILS_H_ diff --git a/mindspore-lite/src/extendrt/utils/serialization.cc b/mindspore-lite/src/extendrt/utils/serialization.cc index 01521fd4..752c7760 100644 --- a/mindspore-lite/src/extendrt/utils/serialization.cc +++ b/mindspore-lite/src/extendrt/utils/serialization.cc @@ -27,7 +27,7 @@ #include "include/api/types.h" #include "src/common/crypto.h" -namespace mindspore::infer { +namespace mindspore::lite::infer { static mindspore::Status RealPath(const std::string &file, std::string *realpath_str) { MS_EXCEPTION_IF_NULL(realpath_str); char real_path_mem[PATH_MAX] = {0}; @@ -151,4 +151,4 @@ mindspore::Status Serialization::Load(const void *model_data, size_t data_size, MS_LOG(ERROR) << err_msg.str(); return mindspore::Status(kMEInvalidInput, err_msg.str()); } -} // namespace mindspore::infer +} // namespace mindspore::lite::infer diff --git a/mindspore-lite/src/extendrt/utils/serialization.h b/mindspore-lite/src/extendrt/utils/serialization.h index 5af05794..7b339850 100644 --- a/mindspore-lite/src/extendrt/utils/serialization.h +++ b/mindspore-lite/src/extendrt/utils/serialization.h @@ -27,7 +27,7 @@ #include "include/api/dual_abi_helper.h" #include "base/base.h" -namespace mindspore::infer { +namespace mindspore::lite::infer { class Serialization { public: static mindspore::Status Load(const void *model_data, size_t data_size, mindspore::ModelType model_type, @@ -38,5 +38,5 @@ class Serialization { static mindspore::FuncGraphPtr ConvertStreamToFuncGraph(const char *buf, const size_t buf_size, bool is_lite, const std::string &mindir_path = ""); }; -} // namespace mindspore::infer +} // namespace mindspore::lite::infer #endif // MINDSPORE_LITE_SRD_EXTENDRT_UTILS_SERIALIZATION_H_ diff --git a/mindspore-lite/src/extendrt/utils/tensor_default_impl.h b/mindspore-lite/src/extendrt/utils/tensor_default_impl.h index 238b3fd0..12894ac5 100644 --- a/mindspore-lite/src/extendrt/utils/tensor_default_impl.h +++ b/mindspore-lite/src/extendrt/utils/tensor_default_impl.h @@ -32,9 +32,8 @@ #include "common/mutable_tensor_impl.h" #include "src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h" -namespace mindspore { +namespace mindspore::lite { #define CHECK_SIZE_MUL_OVERFLOW(x, y) (((x) == 0) ? false : (SIZE_MAX / (x)) < (y)) - class TensorDefaultImpl : public MutableTensorImpl { public: TensorDefaultImpl() = default; @@ -216,6 +215,6 @@ class TensorDefaultImpl : public MutableTensorImpl { } } }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_UTILS_TENSOR_DEFAULT_IMPL_H_ diff --git a/mindspore-lite/src/extendrt/utils/tensor_utils.cc b/mindspore-lite/src/extendrt/utils/tensor_utils.cc index edaf6321..bbfe7843 100644 --- a/mindspore-lite/src/extendrt/utils/tensor_utils.cc +++ b/mindspore-lite/src/extendrt/utils/tensor_utils.cc @@ -22,7 +22,7 @@ #include "extendrt/utils/tensor_utils.h" -namespace mindspore { +namespace mindspore::lite { kernel::AddressPtr CloudTensorUtils::LiteTensorToAddressPtr(const lite::Tensor *lite_tensor) { kernel::AddressPtr address_ptr = std::make_shared(lite_tensor->data(), lite_tensor->Size()); return address_ptr; @@ -73,4 +73,4 @@ std::vector CloudTensorUtils::LiteTensorToKernelTensorPt return kernel_tensor_list; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/extendrt/utils/tensor_utils.h b/mindspore-lite/src/extendrt/utils/tensor_utils.h index b1c98d50..f8d3404d 100644 --- a/mindspore-lite/src/extendrt/utils/tensor_utils.h +++ b/mindspore-lite/src/extendrt/utils/tensor_utils.h @@ -35,7 +35,7 @@ #ifdef ENABLE_CLOUD_INFERENCE #include "src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h" #endif -namespace mindspore { +namespace mindspore::lite { class CloudTensorUtils { public: /* lite tensor ---> Address */ @@ -48,6 +48,6 @@ class CloudTensorUtils { static std::vector LiteTensorToKernelTensorPtrVec( const std::vector &lite_tensors); }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_UTILS_TENSOR_UTILS_H_ diff --git a/mindspore-lite/src/infer/context.h b/mindspore-lite/src/infer/context.h index 9e88b0d1..9b7b7851 100644 --- a/mindspore-lite/src/infer/context.h +++ b/mindspore-lite/src/infer/context.h @@ -20,12 +20,12 @@ #include "litert/inner_context.h" -namespace mindspore { +namespace mindspore::lite { namespace infer::abstract { using Context = mindspore::lite::InnerContext; } using InferContext = infer::abstract::Context; using InferContextPtr = std::shared_ptr; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INFER_CONTEXT_H_ diff --git a/mindspore-lite/src/infer/kernel.h b/mindspore-lite/src/infer/kernel.h index 5d9e5721..174bd41b 100644 --- a/mindspore-lite/src/infer/kernel.h +++ b/mindspore-lite/src/infer/kernel.h @@ -22,11 +22,11 @@ #include "infer/tensor.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { namespace infer::abstract { using Kernel = mindspore::kernel::KernelExec; } using InferKernel = infer::abstract::Kernel; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INFER_KERNEL_H_ diff --git a/mindspore-lite/src/infer/primitive_type.cc b/mindspore-lite/src/infer/primitive_type.cc index 9eaaf430..59b68992 100644 --- a/mindspore-lite/src/infer/primitive_type.cc +++ b/mindspore-lite/src/infer/primitive_type.cc @@ -17,7 +17,7 @@ #include "src/infer/primitive_type.h" #include "nnacl_c/op_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { #ifdef ENABLE_CLOUD_INFERENCE namespace { class PrimitiveTypeHelper { @@ -116,4 +116,4 @@ schema::PrimitiveType PrimitiveType::SchemaType() const { return static_cast(this->flatbuffers_type_); } #endif -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/infer/tensor.h b/mindspore-lite/src/infer/tensor.h index d0a85fac..9b698ede 100644 --- a/mindspore-lite/src/infer/tensor.h +++ b/mindspore-lite/src/infer/tensor.h @@ -21,12 +21,12 @@ #include #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { namespace infer::abstract { using Tensor = mindspore::lite::Tensor; } using InferTensor = infer::abstract::Tensor; using InferTensorPtr = std::shared_ptr; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_INFER_TENSOR_H_ diff --git a/mindspore-lite/src/litert/allocator.cc b/mindspore-lite/src/litert/allocator.cc index 32eda3bf..c50cf778 100644 --- a/mindspore-lite/src/litert/allocator.cc +++ b/mindspore-lite/src/litert/allocator.cc @@ -15,6 +15,6 @@ */ #include "src/litert/inner_allocator.h" -namespace mindspore { +namespace mindspore::lite { std::shared_ptr Allocator::Create() { return std::make_shared(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/c_api/model_c.cc b/mindspore-lite/src/litert/c_api/model_c.cc index b27b2dde..aba29fe3 100644 --- a/mindspore-lite/src/litert/c_api/model_c.cc +++ b/mindspore-lite/src/litert/c_api/model_c.cc @@ -21,7 +21,7 @@ #include "include/api/types.h" #include "src/litert/cxx_api/tensor/tensor_impl.h" -namespace mindspore { +namespace mindspore::lite { class ModelC { public: ModelC() : model_(nullptr) {} @@ -139,7 +139,7 @@ mindspore::MSKernelCallBack ModelC::TransCallBack(const MSKernelCallBackC &ms_ca } return call_back; } -} // namespace mindspore +} // namespace mindspore::lite MSModelHandle MSModelCreate() { auto impl = new (std::nothrow) mindspore::ModelC(); diff --git a/mindspore-lite/src/litert/cxx_api/callback/callback_adapter.h b/mindspore-lite/src/litert/cxx_api/callback/callback_adapter.h index f3be8c47..c6f566cc 100644 --- a/mindspore-lite/src/litert/cxx_api/callback/callback_adapter.h +++ b/mindspore-lite/src/litert/cxx_api/callback/callback_adapter.h @@ -20,7 +20,7 @@ #include "include/api/model.h" #include "include/train/train_loop_callback.h" -namespace mindspore { +namespace mindspore::lite { class TrainLoopCallBackAdapter : public lite::TrainLoopCallBack { public: @@ -55,6 +55,6 @@ class TrainLoopCallBackAdapter : public lite::TrainLoopCallBack { Model *model_; TrainCallBack *call_back_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_CALLBACK_CALLBACK_ADAPTER_H_ diff --git a/mindspore-lite/src/litert/cxx_api/callback/callback_impl.h b/mindspore-lite/src/litert/cxx_api/callback/callback_impl.h index 956f8cde..d44bb4c5 100644 --- a/mindspore-lite/src/litert/cxx_api/callback/callback_impl.h +++ b/mindspore-lite/src/litert/cxx_api/callback/callback_impl.h @@ -19,7 +19,7 @@ #include "include/train/train_loop_callback.h" -namespace mindspore { +namespace mindspore::lite { class CallbackImpl { public: @@ -30,6 +30,6 @@ class CallbackImpl { protected: lite::TrainLoopCallBack *internal_call_back_ = nullptr; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_CALLBACK_CALLBACK_IMPL_H_ diff --git a/mindspore-lite/src/litert/cxx_api/callback/ckpt_saver.cc b/mindspore-lite/src/litert/cxx_api/callback/ckpt_saver.cc index 6ed26cf5..bc756c18 100644 --- a/mindspore-lite/src/litert/cxx_api/callback/ckpt_saver.cc +++ b/mindspore-lite/src/litert/cxx_api/callback/ckpt_saver.cc @@ -22,7 +22,7 @@ #include "src/litert/cxx_api/callback/callback_impl.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { CkptSaver::CkptSaver(int save_every_n, const std::vector &filename_prefix) { callback_impl_ = new (std::nothrow) CallbackImpl(new (std::nothrow) lite::CkptSaver(save_every_n, CharToString(filename_prefix))); @@ -43,4 +43,4 @@ CkptSaver::~CkptSaver() { delete callback_impl_; callback_impl_ = nullptr; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/callback/loss_monitor.cc b/mindspore-lite/src/litert/cxx_api/callback/loss_monitor.cc index 6094143b..55cb4adf 100644 --- a/mindspore-lite/src/litert/cxx_api/callback/loss_monitor.cc +++ b/mindspore-lite/src/litert/cxx_api/callback/loss_monitor.cc @@ -22,7 +22,7 @@ #include "src/litert/cxx_api/callback/callback_impl.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { LossMonitor::LossMonitor(int print_every_n_steps) { callback_impl_ = new (std::nothrow) CallbackImpl(new (std::nothrow) lite::LossMonitor(print_every_n_steps)); if (callback_impl_ == nullptr) { @@ -58,4 +58,4 @@ const std::vector &LossMonitor::GetLossPoints() { return (reinterpret_cast(internal_call_back))->GetLossPoints(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/callback/lr_scheduler.cc b/mindspore-lite/src/litert/cxx_api/callback/lr_scheduler.cc index 527f1603..5c86e264 100644 --- a/mindspore-lite/src/litert/cxx_api/callback/lr_scheduler.cc +++ b/mindspore-lite/src/litert/cxx_api/callback/lr_scheduler.cc @@ -22,7 +22,7 @@ #include "src/litert/cxx_api/callback/callback_impl.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { int StepLRLambda(float *lr, int epoch, void *lr_cb_data) { if ((lr == nullptr) || (lr_cb_data == nullptr)) { MS_LOG(ERROR) << "nullptr passed as input to MultiplicativeLRLambda"; @@ -59,4 +59,4 @@ LRScheduler::~LRScheduler() { delete callback_impl_; callback_impl_ = nullptr; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/callback/train_accuracy.cc b/mindspore-lite/src/litert/cxx_api/callback/train_accuracy.cc index 7081d04c..db279bf8 100644 --- a/mindspore-lite/src/litert/cxx_api/callback/train_accuracy.cc +++ b/mindspore-lite/src/litert/cxx_api/callback/train_accuracy.cc @@ -22,7 +22,7 @@ #include "src/litert/cxx_api/callback/callback_impl.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { TrainAccuracy::TrainAccuracy(int print_every_n, int accuracy_metrics, const std::vector &input_indexes, const std::vector &output_indexes) { callback_impl_ = new (std::nothrow) CallbackImpl(new (std::nothrow) lite::ClassificationTrainAccuracyMonitor( @@ -60,4 +60,4 @@ const std::vector &TrainAccuracy::GetAccuracyPoints() { return (reinterpret_cast(internal_call_back))->GetAccuracyPoints(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/cell.cc b/mindspore-lite/src/litert/cxx_api/cell.cc index d392a70e..0363f5fb 100644 --- a/mindspore-lite/src/litert/cxx_api/cell.cc +++ b/mindspore-lite/src/litert/cxx_api/cell.cc @@ -17,7 +17,7 @@ #include "include/api/cell.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { std::vector CellBase::operator()(const std::vector &inputs) const { std::vector empty; MS_LOG(ERROR) << "Unsupported feature."; @@ -58,4 +58,4 @@ InputAndOutput::InputAndOutput(const std::shared_ptr &cell, const std: int32_t index) { MS_LOG(ERROR) << "Unsupported feature."; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/context.cc b/mindspore-lite/src/litert/cxx_api/context.cc index 23673c5d..4f585453 100644 --- a/mindspore-lite/src/litert/cxx_api/context.cc +++ b/mindspore-lite/src/litert/cxx_api/context.cc @@ -22,7 +22,7 @@ #include "src/litert/inner_allocator.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { constexpr auto kModelOptionCpuEnableFP16 = "mindspore.option.cpu.enable_fp16"; constexpr auto kModelOptionGPUEnableFP16 = "mindspore.option.gpu.enable_fp16"; constexpr auto kModelOptionNPUEnableFP16 = "mindspore.option.npu.enable_fp16"; @@ -683,4 +683,4 @@ std::vector AscendDeviceInfo::GetBufferOptimizeModeChar() const { const std::string &ref = GetValue(data_, kModelOptionAscendBufferOptimize); return StringToChar(ref); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/context.h b/mindspore-lite/src/litert/cxx_api/context.h index 0923d1a4..beaed580 100644 --- a/mindspore-lite/src/litert/cxx_api/context.h +++ b/mindspore-lite/src/litert/cxx_api/context.h @@ -29,7 +29,7 @@ #include "include/api/context.h" #include "include/api/delegate.h" -namespace mindspore { +namespace mindspore::lite { struct Context::Data { std::vector> device_info_list; int affinity_mode_ = 0; @@ -51,6 +51,6 @@ struct DeviceInfoContext::Data { #endif std::shared_ptr allocator = nullptr; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_CONTEXT_H_ diff --git a/mindspore-lite/src/litert/cxx_api/converters.cc b/mindspore-lite/src/litert/cxx_api/converters.cc index 8daf0242..2445091d 100644 --- a/mindspore-lite/src/litert/cxx_api/converters.cc +++ b/mindspore-lite/src/litert/cxx_api/converters.cc @@ -17,7 +17,7 @@ #include "src/common/log_adapter.h" #include "src/common/utils.h" -namespace mindspore { +namespace mindspore::lite { constexpr static int kMaxNumOfDevices = 3; constexpr static int kDefaultThreadNumTwo = 2; constexpr static int kDefaultThreadNumFour = 4; @@ -171,4 +171,4 @@ std::shared_ptr ContextUtils::Convert(Context *context) { } return inner_context; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/converters.h b/mindspore-lite/src/litert/cxx_api/converters.h index d5103002..1595e108 100644 --- a/mindspore-lite/src/litert/cxx_api/converters.h +++ b/mindspore-lite/src/litert/cxx_api/converters.h @@ -26,7 +26,7 @@ #include "src/litert/inner_context.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { class MS_API ContextUtils { public: static std::shared_ptr Convert(Context *context); @@ -66,6 +66,6 @@ inline lite::QuantizationType A2L_ConvertQT(mindspore::QuantizationType qt) { } Status A2L_ConvertConfig(const TrainCfg *a_train_cfg, lite::TrainCfg *l_train_cfg); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_CONVERTERS_H_ diff --git a/mindspore-lite/src/litert/cxx_api/graph/graph.cc b/mindspore-lite/src/litert/cxx_api/graph/graph.cc index 5c1567cc..71b11fda 100644 --- a/mindspore-lite/src/litert/cxx_api/graph/graph.cc +++ b/mindspore-lite/src/litert/cxx_api/graph/graph.cc @@ -18,7 +18,7 @@ #include "include/api/cell.h" #include "src/litert/cxx_api/graph/graph_data.h" -namespace mindspore { +namespace mindspore::lite { Graph::Graph() : graph_data_(nullptr) {} Graph::Graph(const std::shared_ptr &graph_data) : graph_data_(graph_data) {} @@ -34,4 +34,4 @@ bool Graph::operator==(std::nullptr_t) const { return graph_data_ == nullptr; } bool Graph::operator!=(std::nullptr_t) const { return graph_data_ != nullptr; } ModelType Graph::ModelType() const { return kMindIR_Lite; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/graph/graph_data.h b/mindspore-lite/src/litert/cxx_api/graph/graph_data.h index 040a90c9..8b2b150a 100644 --- a/mindspore-lite/src/litert/cxx_api/graph/graph_data.h +++ b/mindspore-lite/src/litert/cxx_api/graph/graph_data.h @@ -25,7 +25,7 @@ #include "include/api/types.h" #include "src/litert/lite_model.h" -namespace mindspore { +namespace mindspore::lite { class Graph::GraphData { public: GraphData() : lite_model_(nullptr) {} @@ -42,6 +42,6 @@ class Graph::GraphData { std::shared_ptr lite_model_ = nullptr; std::string file_name_ = ""; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_GRAPH_GRAPH_DATA_H_ diff --git a/mindspore-lite/src/litert/cxx_api/kernel.cc b/mindspore-lite/src/litert/cxx_api/kernel.cc index 9d321b24..8068a841 100644 --- a/mindspore-lite/src/litert/cxx_api/kernel.cc +++ b/mindspore-lite/src/litert/cxx_api/kernel.cc @@ -1,71 +1,71 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "include/api/kernel.h" -#include "include/errorcode.h" -#include "src/registry/kernel_interface_registry.h" -#include "src/common/log_adapter.h" - -namespace mindspore::kernel { -void Kernel::Initialize() { - if (primitive_ == nullptr) { - return; - } - type_ = primitive_->value_type(); - if (type_ == schema::PrimitiveType_Custom) { - auto param = primitive_->value_as_Custom(); - if (param != nullptr && param->type() != nullptr) { - SetAttr("type", param->type()->str()); - } - } -} - -int Kernel::InferShape() { -#ifndef CUSTOM_KERNEL_REGISTRY_CLIP - std::shared_ptr kernel_interface = nullptr; - if (type() == schema::PrimitiveType_Custom) { - kernel_interface = registry::KernelInterfaceRegistry::Instance()->GetKernelInterface("", nullptr, this); - } else { - auto device_list = const_cast(context_)->MutableDeviceInfo(); - for (auto &device : device_list) { - MS_CHECK_TRUE_RET(device != nullptr, lite::RET_NULL_PTR); - kernel_interface = - registry::KernelInterfaceRegistry::Instance()->GetKernelInterface(device->GetProvider(), nullptr, this); - if (kernel_interface != nullptr) { - break; - } - } - } - - if (kernel_interface == nullptr) { - MS_LOG(ERROR) << "op_type: " << schema::EnumNamePrimitiveType(type_) << " can not find infer interface."; - return lite::RET_NOT_SUPPORT; - } - auto ret = kernel_interface->Infer(&inputs_, &outputs_, static_cast(primitive_), this); - if (ret == kLiteInferInvalid) { - for (auto output : outputs_) { - output.SetShape({-1}); - } - return lite::RET_INFER_INVALID; - } - if (ret != kSuccess) { - MS_LOG(ERROR) << "op_type: " << schema::EnumNamePrimitiveType(type_) << " infer fail!ret: " << ret; - return lite::RET_ERROR; - } - return lite::RET_OK; -#endif - return lite::RET_NOT_SUPPORT; -} -} // namespace mindspore::kernel +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "include/api/kernel.h" +#include "include/errorcode.h" +#include "src/registry/kernel_interface_registry.h" +#include "src/common/log_adapter.h" + +namespace mindspore::lite::kernel { +void Kernel::Initialize() { + if (primitive_ == nullptr) { + return; + } + type_ = primitive_->value_type(); + if (type_ == schema::PrimitiveType_Custom) { + auto param = primitive_->value_as_Custom(); + if (param != nullptr && param->type() != nullptr) { + SetAttr("type", param->type()->str()); + } + } +} + +int Kernel::InferShape() { +#ifndef CUSTOM_KERNEL_REGISTRY_CLIP + std::shared_ptr kernel_interface = nullptr; + if (type() == schema::PrimitiveType_Custom) { + kernel_interface = registry::KernelInterfaceRegistry::Instance()->GetKernelInterface("", nullptr, this); + } else { + auto device_list = const_cast(context_)->MutableDeviceInfo(); + for (auto &device : device_list) { + MS_CHECK_TRUE_RET(device != nullptr, lite::RET_NULL_PTR); + kernel_interface = + registry::KernelInterfaceRegistry::Instance()->GetKernelInterface(device->GetProvider(), nullptr, this); + if (kernel_interface != nullptr) { + break; + } + } + } + + if (kernel_interface == nullptr) { + MS_LOG(ERROR) << "op_type: " << schema::EnumNamePrimitiveType(type_) << " can not find infer interface."; + return lite::RET_NOT_SUPPORT; + } + auto ret = kernel_interface->Infer(&inputs_, &outputs_, static_cast(primitive_), this); + if (ret == kLiteInferInvalid) { + for (auto output : outputs_) { + output.SetShape({-1}); + } + return lite::RET_INFER_INVALID; + } + if (ret != kSuccess) { + MS_LOG(ERROR) << "op_type: " << schema::EnumNamePrimitiveType(type_) << " infer fail!ret: " << ret; + return lite::RET_ERROR; + } + return lite::RET_OK; +#endif + return lite::RET_NOT_SUPPORT; +} +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_infer.cc b/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_infer.cc index 14f49ac9..202f3afd 100644 --- a/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_infer.cc +++ b/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_infer.cc @@ -17,7 +17,7 @@ #include "include/api/status.h" #include "include/registry/register_kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { class CustomOMInfer : public kernel::KernelInterface { public: CustomOMInfer() = default; @@ -30,4 +30,4 @@ class CustomOMInfer : public kernel::KernelInterface { }; std::shared_ptr CustomOMInferCreator() { return std::make_shared(); } REGISTER_CUSTOM_KERNEL_INTERFACE(Tutorial, Custom_OM, CustomOMInferCreator) -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_kernel.cc b/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_kernel.cc index fddfb998..e4f1ab5d 100644 --- a/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_kernel.cc +++ b/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_kernel.cc @@ -24,7 +24,7 @@ #include "include/securec.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { const auto kFloat32 = DataType::kNumberTypeFloat32; const int MODEL_MAX_RUN_TIME_MS = 100; @@ -194,4 +194,4 @@ std::shared_ptr CustomOMKernelCreator(const std::vector &input REGISTER_CUSTOM_KERNEL(NPU, Tutorial, kFloat32, Custom_OM, CustomOMKernelCreator) REGISTER_CUSTOM_KERNEL(CPU, Tutorial, kFloat32, Custom_OM, CustomOMKernelCreator) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_kernel.h b/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_kernel.h index 5bfab632..b4c979bb 100644 --- a/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_kernel.h +++ b/mindspore-lite/src/litert/cxx_api/kernel_executor/custom_om_kernel.h @@ -24,7 +24,7 @@ #include "model_manager/model_manager.h" #include "model/built_model.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class CustomOMKernel : public Kernel { public: @@ -53,5 +53,5 @@ class CustomOMKernel : public Kernel { std::vector> hiai_outputs_{}; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_KERNEL_EXECUTOR_CUSTOM_OM_KERNEL_H diff --git a/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor.cc b/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor.cc index 6524b568..e3c60433 100644 --- a/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor.cc +++ b/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor.cc @@ -17,7 +17,7 @@ #include "src/litert/cxx_api/kernel_executor/kernel_executor.h" #include "src/litert/cxx_api/kernel_executor/kernel_executor_impl.h" -namespace mindspore { +namespace mindspore::lite { Status KernelExecutor::Build(const std::shared_ptr &op, const std::vector &inputs, const std::shared_ptr &ms_context) { if (impl_ == nullptr) { @@ -59,4 +59,4 @@ Status KernelExecutor::Execute(const std::vector &inputs, std::vector< } return impl_->Execute(inputs, outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor.h b/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor.h index 8a2cde46..35545ae7 100644 --- a/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor.h +++ b/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor.h @@ -25,7 +25,7 @@ #include "ops/base_operator.h" #include "infer/custom.h" -namespace mindspore { +namespace mindspore::lite { class KernelExecutorImpl; class MS_API KernelExecutor { @@ -79,5 +79,5 @@ class MS_API KernelExecutor { private: std::shared_ptr impl_ = nullptr; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_KERNEL_EXECUTOR_KERNEL_EXECUTOR_H_ diff --git a/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor_impl.cc b/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor_impl.cc index e063cfc6..2fc36f00 100644 --- a/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor_impl.cc +++ b/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor_impl.cc @@ -30,7 +30,7 @@ #include "src/litert/cxx_api/kernel_executor/op_converter.h" #include "src/litert/cpu_info.h" -namespace mindspore { +namespace mindspore::lite { namespace { std::unordered_set support_ops = { "Abs", "ReLU", "Sigmoid", "Add", "Argmax", "Argmin", "AvgPool", @@ -404,4 +404,4 @@ bool KernelExecutorImpl::TensorIsValid(const MSTensor &ms_tensor, const lite::Te } return true; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor_impl.h b/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor_impl.h index 3e36e3c0..ad275501 100644 --- a/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor_impl.h +++ b/mindspore-lite/src/litert/cxx_api/kernel_executor/kernel_executor_impl.h @@ -23,7 +23,7 @@ #include "src/executor/kernel_exec.h" #include "common/version_manager.h" -namespace mindspore { +namespace mindspore::lite { class KernelExecutorImpl { public: KernelExecutorImpl(); @@ -59,5 +59,5 @@ class KernelExecutorImpl { std::shared_ptr fbb_; bool support_fp16_ = false; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_KERNEL_EXECUTOR_KERNEL_EXECUTOR_IMPL_H_ diff --git a/mindspore-lite/src/litert/cxx_api/metrics/accuracy.cc b/mindspore-lite/src/litert/cxx_api/metrics/accuracy.cc index d24d880b..fcac7609 100644 --- a/mindspore-lite/src/litert/cxx_api/metrics/accuracy.cc +++ b/mindspore-lite/src/litert/cxx_api/metrics/accuracy.cc @@ -22,7 +22,7 @@ #include "src/litert/cxx_api/metrics/metrics_impl.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { AccuracyMetrics::AccuracyMetrics(int accuracy_metrics, const std::vector &input_indexes, const std::vector &output_indexes) { metrics_impl_ = new (std::nothrow) @@ -62,4 +62,4 @@ float AccuracyMetrics::Eval() { auto internal_metrics = metrics_impl_->GetInternalMetrics(); return (reinterpret_cast(internal_metrics))->Eval(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/metrics/metrics_adapter.h b/mindspore-lite/src/litert/cxx_api/metrics/metrics_adapter.h index 63e00ff2..e2608312 100644 --- a/mindspore-lite/src/litert/cxx_api/metrics/metrics_adapter.h +++ b/mindspore-lite/src/litert/cxx_api/metrics/metrics_adapter.h @@ -20,7 +20,7 @@ #include #include "include/train/metrics.h" -namespace mindspore { +namespace mindspore::lite { class MetricsAdapter : public session::Metrics { public: @@ -35,6 +35,6 @@ class MetricsAdapter : public session::Metrics { private: mindspore::Metrics *metrics_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_METRICS_METRICS_ADAPTER_H_ diff --git a/mindspore-lite/src/litert/cxx_api/metrics/metrics_impl.h b/mindspore-lite/src/litert/cxx_api/metrics/metrics_impl.h index 6c984a7f..c8bcd611 100644 --- a/mindspore-lite/src/litert/cxx_api/metrics/metrics_impl.h +++ b/mindspore-lite/src/litert/cxx_api/metrics/metrics_impl.h @@ -27,7 +27,7 @@ #include "include/api/model.h" #include "include/train/metrics.h" -namespace mindspore { +namespace mindspore::lite { class MetricsImpl { public: @@ -38,6 +38,6 @@ class MetricsImpl { protected: session::Metrics *metrics_ = nullptr; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_METRICS_METRICS_IMPL_H_ diff --git a/mindspore-lite/src/litert/cxx_api/model/model.cc b/mindspore-lite/src/litert/cxx_api/model/model.cc index ea5aed46..4072d8a1 100644 --- a/mindspore-lite/src/litert/cxx_api/model/model.cc +++ b/mindspore-lite/src/litert/cxx_api/model/model.cc @@ -38,7 +38,7 @@ #include "src/common/file_utils.h" #endif -namespace mindspore { +namespace mindspore::lite { #ifdef USE_GLOG extern "C" { extern void mindspore_log_init(); @@ -623,4 +623,4 @@ Status Model::Finalize() { MS_LOG(INFO) << "Finalize is only support for mindspore_lite's ascend backend."; return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/model/model_group.cc b/mindspore-lite/src/litert/cxx_api/model/model_group.cc old mode 100755 new mode 100644 index 6de883a6..bc2d6b65 --- a/mindspore-lite/src/litert/cxx_api/model/model_group.cc +++ b/mindspore-lite/src/litert/cxx_api/model/model_group.cc @@ -22,7 +22,7 @@ #include "src/litert/cxx_api/model/model_group_impl.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { ModelGroup::ModelGroup(ModelGroupFlag flags) { impl_ = std::make_shared(flags); if (impl_ == nullptr) { @@ -58,4 +58,4 @@ Status ModelGroup::CalMaxSizeOfWorkspace(ModelType model_type, const std::shared } return impl_->CalMaxSizeOfWorkspace(model_type, ms_context); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/model/model_group_impl.cc b/mindspore-lite/src/litert/cxx_api/model/model_group_impl.cc old mode 100755 new mode 100644 index e915e57d..89b7165d --- a/mindspore-lite/src/litert/cxx_api/model/model_group_impl.cc +++ b/mindspore-lite/src/litert/cxx_api/model/model_group_impl.cc @@ -29,7 +29,7 @@ #include "src/litert/model_manager.h" #include "src/common/config_file.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::RET_OK; ModelGroupImpl::ModelGroupImpl(ModelGroupFlag flags) : flags_(flags) { static uint32_t g_model_group_id = 0; @@ -144,4 +144,4 @@ Status ModelGroupImpl::CalMaxSizeOfWorkspace(ModelType model_type, const std::sh } return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/model/model_group_impl.h b/mindspore-lite/src/litert/cxx_api/model/model_group_impl.h index d93721df..125d218b 100644 --- a/mindspore-lite/src/litert/cxx_api/model/model_group_impl.h +++ b/mindspore-lite/src/litert/cxx_api/model/model_group_impl.h @@ -29,7 +29,7 @@ #include "src/litert/lite_session.h" #include "src/litert/inner_context.h" -namespace mindspore { +namespace mindspore::lite { class ModelGroupImpl { public: explicit ModelGroupImpl(ModelGroupFlag flags); @@ -48,6 +48,6 @@ class ModelGroupImpl { ModelGroupFlag flags_; uint32_t model_group_id_ = 0; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_CXX_API_MODEL_MODEL_GROUP_IMPL_H_ diff --git a/mindspore-lite/src/litert/cxx_api/model/model_impl.cc b/mindspore-lite/src/litert/cxx_api/model/model_impl.cc index 272c01dd..c92db3e6 100644 --- a/mindspore-lite/src/litert/cxx_api/model/model_impl.cc +++ b/mindspore-lite/src/litert/cxx_api/model/model_impl.cc @@ -39,7 +39,7 @@ #include "src/common/config_file.h" #include "src/litert/cpu_info.h" #include "src/litert/pack_weight_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace { const char *const kExecutionPlan = "execution_plan"; constexpr size_t kMaxSectionNum = 100; @@ -1048,4 +1048,4 @@ int ModelImpl::ModelDeObfuscate() { } return RET_OK; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/model/model_impl.h b/mindspore-lite/src/litert/cxx_api/model/model_impl.h index f8943afc..0daf2e3b 100644 --- a/mindspore-lite/src/litert/cxx_api/model/model_impl.h +++ b/mindspore-lite/src/litert/cxx_api/model/model_impl.h @@ -42,7 +42,7 @@ void clearVectorOfPointers(std::vector *v) { } } -namespace mindspore { +namespace mindspore::lite { typedef std::shared_ptr(CreateTrainSessionProto)(std::shared_ptr graph_data, std::shared_ptr cfg, @@ -143,6 +143,6 @@ class ModelImpl { std::map execution_plan_; std::map> config_info_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_MODEL_MODEL_IMPL_H_ diff --git a/mindspore-lite/src/litert/cxx_api/serialization.cc b/mindspore-lite/src/litert/cxx_api/serialization.cc index 1bc33a69..f57fefa5 100644 --- a/mindspore-lite/src/litert/cxx_api/serialization.cc +++ b/mindspore-lite/src/litert/cxx_api/serialization.cc @@ -26,7 +26,7 @@ #include "src/common/log_adapter.h" #include "src/litert/lite_session.h" -namespace mindspore { +namespace mindspore::lite { Key::Key(const char *dec_key, size_t key_len) { len = 0; if (key_len >= max_key_len) { @@ -230,4 +230,4 @@ Status Serialization::ExportWeightsCollaborateWithMicro(const Model &model, Mode return (ret == mindspore::lite::RET_OK) ? kSuccess : kLiteError; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/tensor/tensor_impl.cc b/mindspore-lite/src/litert/cxx_api/tensor/tensor_impl.cc index 933f2de6..be14b882 100644 --- a/mindspore-lite/src/litert/cxx_api/tensor/tensor_impl.cc +++ b/mindspore-lite/src/litert/cxx_api/tensor/tensor_impl.cc @@ -28,7 +28,7 @@ #ifdef ENABLE_CLOUD_INFERENCE #include "src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h" #endif -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::RET_OK; std::shared_ptr LiteTensorImpl::CreateTensorImpl(const std::string &name, enum DataType type, @@ -152,4 +152,4 @@ std::vector LiteTensorImpl::TensorImplToStrings(const std::shared_p return result; } #endif -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/tensor/tensor_impl.h b/mindspore-lite/src/litert/cxx_api/tensor/tensor_impl.h index fccabd86..3819d3b8 100644 --- a/mindspore-lite/src/litert/cxx_api/tensor/tensor_impl.h +++ b/mindspore-lite/src/litert/cxx_api/tensor/tensor_impl.h @@ -35,7 +35,7 @@ #include "src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h" #endif -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::RET_OK; class LiteTensorImpl : public MutableTensorImpl { @@ -307,6 +307,6 @@ class LiteTensorImpl : public MutableTensorImpl { bool from_session_ = false; }; using LiteTensorImplPtr = std::shared_ptr; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_TENSOR_TENSOR_IMPL_H_ diff --git a/mindspore-lite/src/litert/cxx_api/tensor_utils.cc b/mindspore-lite/src/litert/cxx_api/tensor_utils.cc index 01db45ea..7dc98d0b 100644 --- a/mindspore-lite/src/litert/cxx_api/tensor_utils.cc +++ b/mindspore-lite/src/litert/cxx_api/tensor_utils.cc @@ -18,7 +18,7 @@ #include "src/common/log_adapter.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { size_t MS_API CalTensorDataSize(const std::vector &shape, enum DataType type) { size_t element_size = lite::DataTypeSize(static_cast(type)); for (size_t i = 0; i < shape.size(); i++) { @@ -85,4 +85,4 @@ std::vector LiteTensorsToMSTensors(const std::vector MS_API TruncateShape(const std::vector &shape, enum TypeId type, size_t data_len, bool verify_size); @@ -36,6 +36,6 @@ Status MS_API LiteTensorToMSTensor(lite::Tensor *srcTensor, MSTensor *dstTensor, std::vector MS_API LiteTensorsToMSTensors(const std::vector &srcTensors, bool fromSession = true); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_CXX_API_TENSOR_UTILS_H_ diff --git a/mindspore-lite/src/litert/cxx_api/train/converters.cc b/mindspore-lite/src/litert/cxx_api/train/converters.cc index 8c49337d..bfe7e4f4 100644 --- a/mindspore-lite/src/litert/cxx_api/train/converters.cc +++ b/mindspore-lite/src/litert/cxx_api/train/converters.cc @@ -18,7 +18,7 @@ #include "include/api/cfg.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { Status A2L_ConvertConfig(const TrainCfg *a_train_cfg, lite::TrainCfg *l_train_cfg) { if ((a_train_cfg == nullptr) || (l_train_cfg == nullptr)) { MS_LOG(ERROR) << "Invalid train_cfg pointers"; @@ -34,4 +34,4 @@ Status A2L_ConvertConfig(const TrainCfg *a_train_cfg, lite::TrainCfg *l_train_cf l_train_cfg->accumulate_gradients_ = a_train_cfg->accumulate_gradients_; return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/train/model.cc b/mindspore-lite/src/litert/cxx_api/train/model.cc index 7ac2c3c1..32ab53f7 100644 --- a/mindspore-lite/src/litert/cxx_api/train/model.cc +++ b/mindspore-lite/src/litert/cxx_api/train/model.cc @@ -23,7 +23,7 @@ #include "src/train/train_loop.h" #include "include/train/train_loop_callback.h" -namespace mindspore { +namespace mindspore::lite { Status Model::Train(int epochs, std::shared_ptr ds, std::vector i_cbs) { if ((impl_ == nullptr) || (impl_->session_ == nullptr) || ds == nullptr) { MS_LOG(ERROR) << "Model implement or dataset is null."; @@ -106,4 +106,4 @@ Status Model::Finalize() { MS_LOG(INFO) << "Finalize is only support for mindspore_lite's ascend inference backend."; return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/train/model_build.cc b/mindspore-lite/src/litert/cxx_api/train/model_build.cc index 6ec79777..3df13bb7 100644 --- a/mindspore-lite/src/litert/cxx_api/train/model_build.cc +++ b/mindspore-lite/src/litert/cxx_api/train/model_build.cc @@ -17,7 +17,7 @@ #include "include/api/model.h" #include "src/common/log_adapter.h" #include "src/litert/cxx_api/model/model_impl.h" -namespace mindspore { +namespace mindspore::lite { Status Model::BuildTransferLearning(GraphCell backbone, GraphCell head, const std::shared_ptr &context, const std::shared_ptr &train_cfg) { std::stringstream err_msg; @@ -49,4 +49,4 @@ Status Model::BuildTransferLearning(GraphCell backbone, GraphCell head, const st } return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/train/model_build_impl.cc b/mindspore-lite/src/litert/cxx_api/train/model_build_impl.cc index ef561708..66530e1d 100644 --- a/mindspore-lite/src/litert/cxx_api/train/model_build_impl.cc +++ b/mindspore-lite/src/litert/cxx_api/train/model_build_impl.cc @@ -18,7 +18,7 @@ #include "include/train/train_cfg.h" #include "src/litert/cxx_api/converters.h" #include "src/train/transfer_session.h" -namespace mindspore { +namespace mindspore::lite { Status ModelImpl::BuildTransferLearning(const std::shared_ptr &backbone, const std::shared_ptr &head) { const auto b_graph_data = backbone->graph_data_; const auto h_graph_data = head->graph_data_; @@ -57,4 +57,4 @@ Status ModelImpl::BuildTransferLearning(const std::shared_ptr &backbone, MS_LOG(DEBUG) << "Session is not a train session."; return kLiteError; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/train/model_impl.cc b/mindspore-lite/src/litert/cxx_api/train/model_impl.cc index 726b0585..d02f9529 100644 --- a/mindspore-lite/src/litert/cxx_api/train/model_impl.cc +++ b/mindspore-lite/src/litert/cxx_api/train/model_impl.cc @@ -29,7 +29,7 @@ #include "src/train/train_session.h" #include "src/train/transfer_session.h" -namespace mindspore { +namespace mindspore::lite { Status ModelImpl::PrepareMetrics(Model *model, std::vector *out_ms, std::vector *adapter_ms) { if (out_ms == nullptr || adapter_ms == nullptr) { @@ -100,4 +100,4 @@ Status ModelImpl::ConvertCallbacks(Model *model, std::vector *i } return kSuccess; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/train/train_support.cc b/mindspore-lite/src/litert/cxx_api/train/train_support.cc index 13c68a3b..47073b8a 100644 --- a/mindspore-lite/src/litert/cxx_api/train/train_support.cc +++ b/mindspore-lite/src/litert/cxx_api/train/train_support.cc @@ -38,7 +38,7 @@ #include "src/train/train_session.h" #include "src/train/static_allocator.h" -namespace mindspore { +namespace mindspore::lite { std::shared_ptr CreateTrainSession(std::shared_ptr graph_data, std::shared_ptr cfg, const std::shared_ptr &context) { @@ -97,4 +97,4 @@ class TrainSupport { }; TrainSupport support_train_api; -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/cxx_api/types.cc b/mindspore-lite/src/litert/cxx_api/types.cc index dab41944..6f7d192e 100644 --- a/mindspore-lite/src/litert/cxx_api/types.cc +++ b/mindspore-lite/src/litert/cxx_api/types.cc @@ -31,7 +31,7 @@ #include "src/extendrt/delegate/ascend_acl/ascend_allocator_plugin.h" #endif -namespace mindspore { +namespace mindspore::lite { class Buffer::Impl { public: Impl() : data_() {} @@ -735,4 +735,4 @@ std::vector CharVersion() { std::string version = VERSION_STR; return StringToChar("MindSpore Lite " + version); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/delegate/coreml/stub/coreml_delegate_stub.cc b/mindspore-lite/src/litert/delegate/coreml/stub/coreml_delegate_stub.cc index 992dff07..9f1fb351 100644 --- a/mindspore-lite/src/litert/delegate/coreml/stub/coreml_delegate_stub.cc +++ b/mindspore-lite/src/litert/delegate/coreml/stub/coreml_delegate_stub.cc @@ -16,7 +16,7 @@ #include "include/api/delegate.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { Status CoreMLDelegate::Init() { MS_LOG(ERROR) << "Only supported by IOS system and the MSLITE_ENABLE_COREML is turned on"; return kLiteError; @@ -26,4 +26,4 @@ Status CoreMLDelegate::Build(DelegateModel *model) { MS_LOG(ERROR) << "Only supported by IOS system and the MSLITE_ENABLE_COREML is turned on"; return kLiteError; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/cache_algorithm.h b/mindspore-lite/src/litert/delegate/parameter_cache/cache_algorithm.h index c496b76b..f64180be 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/cache_algorithm.h +++ b/mindspore-lite/src/litert/delegate/parameter_cache/cache_algorithm.h @@ -20,7 +20,7 @@ #include #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { namespace cache { struct CacheNoe { CacheNoe(int _index, int _frequency, int _value) : key(_index), frequency(_frequency), value(_value) {} @@ -39,5 +39,5 @@ class CacheAlgorithm { std::vector *need_swap_indies, std::vector *need_swap_indies_cache_index) = 0; }; } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_PARAMETER_CACHE_CACHE_ALGORITHM_H_ diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/cache_mem_base.h b/mindspore-lite/src/litert/delegate/parameter_cache/cache_mem_base.h index 8844e787..240f0763 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/cache_mem_base.h +++ b/mindspore-lite/src/litert/delegate/parameter_cache/cache_mem_base.h @@ -19,7 +19,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace cache { class CacheMemBase { public: @@ -37,5 +37,5 @@ class CacheMemBase { size_t cache_vocab_size, size_t embedding_size, size_t swap_in_size) = 0; }; } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_PARAMETER_CACHE_CACHE_MEM_BASE_H_ diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache.cc b/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache.cc index d41485e6..f12169c6 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache.cc +++ b/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache.cc @@ -29,7 +29,7 @@ namespace { constexpr size_t kEmbeddingTensorShapeSize = 2; } -namespace mindspore { +namespace mindspore::lite { namespace cache { void LookUpTableTask(size_t indices_lens, size_t first_dim_size, const char *input_addr, const int *indices_addr, char *output_addr, size_t embedding_len, int min_host_index) { @@ -234,4 +234,4 @@ Status EmbeddingCache::CheckCacheHit(const int *batch_ids, const size_t batch_id return kSuccess; } } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache.h b/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache.h index ba3c1d89..3997e7d5 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache.h +++ b/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache.h @@ -26,7 +26,7 @@ #include "src/litert/delegate/parameter_cache/cache_algorithm.h" #include "src/litert/delegate/parameter_cache/cache_mem_base.h" -namespace mindspore { +namespace mindspore::lite { namespace cache { class EmbeddingCache { public: @@ -89,5 +89,5 @@ class EmbeddingCache { int max_host_index_{0}; }; } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_PARAMETER_CACHE_EMBEDDING_CACHE_H_ diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache_manager.cc b/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache_manager.cc index 3023d1a5..c153ca9c 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache_manager.cc +++ b/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache_manager.cc @@ -23,7 +23,7 @@ namespace { constexpr size_t kGatherInputsSize = 3; } -namespace mindspore { +namespace mindspore::lite { namespace cache { Status EmbeddingCacheManager::Init(const std::string &cache_model_path, size_t vocab_size, size_t device_cache_size) { if (cache_model_path.empty() || vocab_size == 0 || device_cache_size >= vocab_size) { @@ -191,4 +191,4 @@ int EmbeddingCacheManager::CacheHandle(const std::string &tensor_name, mindspore return lite::RET_OK; } } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache_manager.h b/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache_manager.h index c811384f..6bf15661 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache_manager.h +++ b/mindspore-lite/src/litert/delegate/parameter_cache/embedding_cache_manager.h @@ -27,7 +27,7 @@ #include "src/litert/delegate/parameter_cache/load_host_cache_model.h" #include "src/litert/delegate/tensorrt/distribution/distribution_base.h" -namespace mindspore { +namespace mindspore::lite { namespace cache { class EmbeddingCacheManager { public: @@ -58,5 +58,5 @@ class EmbeddingCacheManager { size_t device_cache_size_; }; } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_PARAMETER_CACHE_EMBEDDING_CACHE_MANAGER_H_ diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/gpu/gpu_cache_mem.cc b/mindspore-lite/src/litert/delegate/parameter_cache/gpu/gpu_cache_mem.cc index 4132f922..4fbf30a3 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/gpu/gpu_cache_mem.cc +++ b/mindspore-lite/src/litert/delegate/parameter_cache/gpu/gpu_cache_mem.cc @@ -21,7 +21,7 @@ #include "plugin/res_manager/gpu/device/cuda_driver.h" #include "src/common/log_adapter.h" #include "src/litert/delegate/parameter_cache/factory_mgr_base.h" -namespace mindspore { +namespace mindspore::lite { namespace cache { namespace gpu { RET_COMMON_PRODUCT_REGISTRAR(std::string, cache::CacheMemBase, cache::gpu::GPUCacheMem, "gpu", GPUCacheMem); @@ -156,4 +156,4 @@ bool GPUCacheMem::HashSwapIn(void *hash_table_addr, void *swap_in_value_addr, vo } } // namespace gpu } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/gpu/gpu_cache_mem.h b/mindspore-lite/src/litert/delegate/parameter_cache/gpu/gpu_cache_mem.h index 739eba7c..cea39eaf 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/gpu/gpu_cache_mem.h +++ b/mindspore-lite/src/litert/delegate/parameter_cache/gpu/gpu_cache_mem.h @@ -21,7 +21,7 @@ #include #include "src/litert/delegate/parameter_cache/cache_mem_base.h" -namespace mindspore { +namespace mindspore::lite { namespace cache { namespace gpu { class GPUCacheMem : public cache::CacheMemBase { @@ -45,5 +45,5 @@ class GPUCacheMem : public cache::CacheMemBase { }; } // namespace gpu } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_PARAMETER_CACHE_GPU_GPU_CACHE_MEM_H_ diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/lfu_cache.cc b/mindspore-lite/src/litert/delegate/parameter_cache/lfu_cache.cc index b00e4d98..23ee1e61 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/lfu_cache.cc +++ b/mindspore-lite/src/litert/delegate/parameter_cache/lfu_cache.cc @@ -18,7 +18,7 @@ #include "src/common/log_adapter.h" #include "src/litert/delegate/parameter_cache/lfu_cache.h" #include "src/litert/delegate/parameter_cache/factory_mgr_base.h" -namespace mindspore { +namespace mindspore::lite { namespace cache { RET_COMMON_PRODUCT_REGISTRAR(std::string, cache::CacheAlgorithm, cache::LFUCacheAlgorithm, "lfu", LFUCacheAlgorithm); @@ -240,4 +240,4 @@ Status LFUCacheAlgorithm::CheckCacheHit(const int *batch_ids, const size_t batch return kSuccess; } } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/lfu_cache.h b/mindspore-lite/src/litert/delegate/parameter_cache/lfu_cache.h index 1e5d0f87..48445168 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/lfu_cache.h +++ b/mindspore-lite/src/litert/delegate/parameter_cache/lfu_cache.h @@ -23,7 +23,7 @@ #include #include "include/api/status.h" #include "src/litert/delegate/parameter_cache/cache_algorithm.h" -namespace mindspore { +namespace mindspore::lite { namespace cache { class LFUCacheAlgorithm : public CacheAlgorithm { public: @@ -51,5 +51,5 @@ class LFUCacheAlgorithm : public CacheAlgorithm { int max_host_index_{1}; }; } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_PARAMETER_CACHE_LFU_CACHE_H_ diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/load_host_cache_model.cc b/mindspore-lite/src/litert/delegate/parameter_cache/load_host_cache_model.cc index d3da0e4b..a5b2a5af 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/load_host_cache_model.cc +++ b/mindspore-lite/src/litert/delegate/parameter_cache/load_host_cache_model.cc @@ -27,7 +27,7 @@ namespace { constexpr size_t kGatherInputsSize = 3; } -namespace mindspore { +namespace mindspore::lite { namespace cache { HostCacheModel::~HostCacheModel() { if (cache_model_ != nullptr) { @@ -145,4 +145,4 @@ MSTensor HostCacheModel::GetHostCacheTensor(kernel::Kernel *kernel) { return MSTensor(nullptr); } } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/delegate/parameter_cache/load_host_cache_model.h b/mindspore-lite/src/litert/delegate/parameter_cache/load_host_cache_model.h index 93e52171..a45ab64b 100644 --- a/mindspore-lite/src/litert/delegate/parameter_cache/load_host_cache_model.h +++ b/mindspore-lite/src/litert/delegate/parameter_cache/load_host_cache_model.h @@ -26,7 +26,7 @@ #include "include/api/delegate.h" #include "src/litert/lite_model.h" -namespace mindspore { +namespace mindspore::lite { namespace cache { class HostCacheModel { public: @@ -44,5 +44,5 @@ class HostCacheModel { size_t model_size_; }; } // namespace cache -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_EMBEDDING_CACHE_H_ diff --git a/mindspore-lite/src/litert/inner_allocator.cc b/mindspore-lite/src/litert/inner_allocator.cc index f96d7d65..0886f10e 100644 --- a/mindspore-lite/src/litert/inner_allocator.cc +++ b/mindspore-lite/src/litert/inner_allocator.cc @@ -19,7 +19,7 @@ #include "src/common/log_adapter.h" #include "src/common/utils.h" -namespace mindspore { +namespace mindspore::lite { DefaultAllocator::DefaultAllocator(size_t aligned_size) { aligned_size_ = aligned_size; max_malloc_size_ = lite::GetMaxMallocSize(); @@ -178,4 +178,4 @@ void DefaultAllocator::Clear() { freeList_.clear(); UnLock(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/inner_allocator.h b/mindspore-lite/src/litert/inner_allocator.h index 8c89508c..c87dffee 100644 --- a/mindspore-lite/src/litert/inner_allocator.h +++ b/mindspore-lite/src/litert/inner_allocator.h @@ -27,7 +27,7 @@ #include #include "include/api/allocator.h" -namespace mindspore { +namespace mindspore::lite { struct AllocatorContext { int shiftFactor; bool lockFlag; @@ -71,6 +71,6 @@ class DefaultAllocator : public Allocator { constexpr int64_t MAX_MALLOC_SIZE = static_cast(2000) * 1024 * 1024; constexpr int64_t MAX_THREAD_POOL_SIZE = static_cast(3000) * 1024 * 1024; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_INNER_ALLOCATOR_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/arithmetic_base.cc b/mindspore-lite/src/litert/kernel/cpu/base/arithmetic_base.cc index 7b97dadd..88b2fce5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/arithmetic_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/arithmetic_base.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ArithmeticBaseRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(kernel); @@ -453,4 +453,4 @@ void ArithmeticBaseCPUKernel::ComputeOffset(int task_id) { block_boundary_infos_[task_id].b_offset.push_back(b_offset * b_matric_.inner_size * in_data_size_); } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/arithmetic_base.h b/mindspore-lite/src/litert/kernel/cpu/base/arithmetic_base.h index 3e3ad63d..b868b7e0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/arithmetic_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/arithmetic_base.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/arithmetic_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ArithmeticBaseCPUKernel : public LiteKernel { public: ArithmeticBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -95,5 +95,5 @@ class ArithmeticBaseCPUKernel : public LiteKernel { std::vector broadcast_buffer_; std::vector block_boundary_infos_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_ARITHMETIC_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/assert.cc b/mindspore-lite/src/litert/kernel/cpu/base/assert.cc index dcac9666..3acda260 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/assert.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/assert.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Assert; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int AssertCPUKernel::Prepare() { CHECK_NOT_EQUAL_RETURN(in_tensors_.size(), 1); CHECK_NOT_EQUAL_RETURN(out_tensors_.size(), 1); @@ -50,4 +50,4 @@ int AssertCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Assert, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Assert, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Assert, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/assert.h b/mindspore-lite/src/litert/kernel/cpu/base/assert.h index 2a1f48b5..0b9f6f16 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/assert.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/assert.h @@ -19,7 +19,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class AssertCPUKernel : public LiteKernel { public: AssertCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -31,6 +31,6 @@ class AssertCPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_ASSERT_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/call.cc b/mindspore-lite/src/litert/kernel/cpu/base/call.cc index a894aa19..c4d004d2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/call.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/call.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Call; // this file is useless when move create actor before schedule. -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int CallCPUKernel::Prepare() { return RET_OK; } int CallCPUKernel::ReSize() { return RET_OK; } int CallCPUKernel::Run() { return RET_OK; } @@ -34,4 +34,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Call, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Call, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Call, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/call.h b/mindspore-lite/src/litert/kernel/cpu/base/call.h index 81fefd06..b455f64b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/call.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/call.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" // this file is useless when move create actor before schedule. -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CallCPUKernel : public LiteKernel { public: CallCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -32,6 +32,6 @@ class CallCPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_CALL_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/constant_of_shape.cc b/mindspore-lite/src/litert/kernel/cpu/base/constant_of_shape.cc index e31bc3d6..769fd0f6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/constant_of_shape.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/constant_of_shape.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ConstantOfShape; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ConstantOfShapeRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { auto g_kernel = reinterpret_cast(cdata); CHECK_NULL_RETURN(g_kernel); @@ -105,4 +105,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_ConstantOfShape, LiteKernelCr REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_ConstantOfShape, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt64, PrimitiveType_ConstantOfShape, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_ConstantOfShape, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/constant_of_shape.h b/mindspore-lite/src/litert/kernel/cpu/base/constant_of_shape.h index b2032cab..1ec0c30e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/constant_of_shape.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/constant_of_shape.h @@ -23,7 +23,7 @@ #include "nnacl_c/fp32/constant_of_shape_fp32.h" #include "nnacl_c/fp16/constant_of_shape_fp16.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConstantOfShapeCPUKernel : public LiteKernel { public: ConstantOfShapeCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -43,6 +43,6 @@ class ConstantOfShapeCPUKernel : public LiteKernel { void *output_ptr_ = nullptr; int thread_stride_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_CONSTANT_OF_SHAPE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/convolution_base.cc b/mindspore-lite/src/litert/kernel/cpu/base/convolution_base.cc index 0aedf4b9..dc3ff788 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/convolution_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/convolution_base.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; using mindspore::schema::ActivationType; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void *ConvolutionBaseCPUKernel::MallocAlignedData(size_t alignment, size_t size) { MS_CHECK_TRUE_RET(size + alignment < MAX_MALLOC_SIZE, nullptr); auto ptr = malloc(size + alignment); @@ -574,4 +574,4 @@ void *ConvolutionBaseCPUKernel::GetConvPackWeightData(size_t data_size) { } return data; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/convolution_base.h b/mindspore-lite/src/litert/kernel/cpu/base/convolution_base.h index cc69c621..fb156434 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/convolution_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/convolution_base.h @@ -35,7 +35,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionBaseCPUKernel : public LiteKernel { public: ConvolutionBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -95,6 +95,6 @@ class ConvolutionBaseCPUKernel : public LiteKernel { void *origin_bias_; // do not free bool use_batch_cut_flag_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_CONVOLUTION_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/custom_is_inf.cc b/mindspore-lite/src/litert/kernel/cpu/base/custom_is_inf.cc index 44ac568e..fc62faec 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/custom_is_inf.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/custom_is_inf.cc @@ -23,7 +23,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int CustomIsInfCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C1NUM); @@ -58,4 +58,4 @@ int CustomIsInfCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimType_Inner_CustomIsInf, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/custom_is_inf.h b/mindspore-lite/src/litert/kernel/cpu/base/custom_is_inf.h index e63d8ec7..da3457a7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/custom_is_inf.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/custom_is_inf.h @@ -19,7 +19,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CustomIsInfCPUKernel : public LiteKernel { public: CustomIsInfCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -33,6 +33,6 @@ class CustomIsInfCPUKernel : public LiteKernel { private: void LaunchKernelFloat(const float *input, bool *output); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_IS_INF_CPU_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/custom_masked_fill.cc b/mindspore-lite/src/litert/kernel/cpu/base/custom_masked_fill.cc index cb384f0a..2e08c51c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/custom_masked_fill.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/custom_masked_fill.cc @@ -23,7 +23,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int CustomMaskedFillCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C3NUM); @@ -87,4 +87,4 @@ int CustomMaskedFillCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimType_Inner_CustomMaskedFill, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/custom_masked_fill.h b/mindspore-lite/src/litert/kernel/cpu/base/custom_masked_fill.h index 04a2dcab..d4788f69 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/custom_masked_fill.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/custom_masked_fill.h @@ -19,7 +19,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CustomMaskedFillCPUKernel : public LiteKernel { public: CustomMaskedFillCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,6 +30,6 @@ class CustomMaskedFillCPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CUSTOM_MASKED_FILL_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/custom_tensor_scatter.cc b/mindspore-lite/src/litert/kernel/cpu/base/custom_tensor_scatter.cc index be974c84..4f2fdbb9 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/custom_tensor_scatter.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/custom_tensor_scatter.cc @@ -26,7 +26,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int TensorScatterRun(void *cdata, int task_id, float, float) { auto kernel = static_cast(cdata); @@ -71,4 +71,4 @@ int CustomTensorScatterCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimType_Inner_CustomTensorScatterMax, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/custom_tensor_scatter.h b/mindspore-lite/src/litert/kernel/cpu/base/custom_tensor_scatter.h index e39733c5..a1e56595 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/custom_tensor_scatter.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/custom_tensor_scatter.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/base/scatter_nd_binary.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CustomTensorScatterCPUKernel : public ScatterNDBinaryCPUKernel { public: explicit CustomTensorScatterCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -31,6 +31,6 @@ class CustomTensorScatterCPUKernel : public ScatterNDBinaryCPUKernel { int Run() override; int TensorScatterDispatch(int task_id); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_BASE_TENSOR_SCATTER_ADD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/detection_post_process_base.cc b/mindspore-lite/src/litert/kernel/cpu/base/detection_post_process_base.cc index a13c9c90..26422557 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/detection_post_process_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/detection_post_process_base.cc @@ -32,7 +32,7 @@ namespace { const constexpr int kSecondTensorIndex = 2; } -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void PartialArgSort(const float *scores, int *indexes, int num_to_sort, int num_values) { std::partial_sort(indexes, indexes + num_to_sort, indexes + num_values, [&scores](const int i, const int j) { if (std::abs(scores[i] - scores[j]) < FLT_EPSILON) { @@ -285,4 +285,4 @@ int DetectionPostProcessBaseCPUKernel::Run() { FreeAllocatedBuffer(); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/detection_post_process_base.h b/mindspore-lite/src/litert/kernel/cpu/base/detection_post_process_base.h index 41e0458b..4e4d11b2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/detection_post_process_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/detection_post_process_base.h @@ -23,7 +23,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DetectionPostProcessBaseCPUKernel : public LiteKernel { public: DetectionPostProcessBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -49,5 +49,5 @@ class DetectionPostProcessBaseCPUKernel : public LiteKernel { virtual int GetInputData() = 0; int ParamInit(); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_DETECTION_POST_PROCESS_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/format_transpose.cc b/mindspore-lite/src/litert/kernel/cpu/base/format_transpose.cc index 7f77e978..9b593530 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/format_transpose.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/format_transpose.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_FormatTranspose; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int FormatTransposeCPUKernel::Run() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -76,4 +76,4 @@ std::string FormatTransposeCPUKernel::name() const { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_FormatTranspose, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_FormatTranspose, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/format_transpose.h b/mindspore-lite/src/litert/kernel/cpu/base/format_transpose.h index 1468808d..e00da65f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/format_transpose.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/format_transpose.h @@ -22,7 +22,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/format_transpose_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class FormatTransposeCPUKernel : public LiteKernel { public: FormatTransposeCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -39,5 +39,5 @@ class FormatTransposeCPUKernel : public LiteKernel { private: FormatTransposeParameter *param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_FORMAT_TRANSPOSE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_base.cc b/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_base.cc index eda78f6b..096c6e97 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_base.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int GroupConvolutionBaseCPUKernel::Prepare() { for (int i = 0; i < group_num_; ++i) { auto sub_conv = group_convs_.at(i); @@ -212,4 +212,4 @@ int GroupConvolutionBaseCPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_base.h b/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_base.h index e11b85dd..48dbf614 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_base.h @@ -25,7 +25,7 @@ #include "nnacl_c/fp32/conv_common_fp32.h" #include "src/litert/kernel/cpu/base/group_convolution_creator.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class GroupConvolutionBaseCPUKernel : public ConvolutionBaseCPUKernel { public: GroupConvolutionBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -62,6 +62,6 @@ class GroupConvolutionBaseCPUKernel : public ConvolutionBaseCPUKernel { int ori_out_channel_ = 0; int out_thread_num_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_GROUP_CONVOLUTION_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_creator.cc b/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_creator.cc index 25cb776b..55a40f69 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_creator.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_creator.cc @@ -16,7 +16,7 @@ #include "src/litert/kernel/cpu/base/group_convolution_creator.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void CopyTensorQuantParam(lite::Tensor *dst, const lite::Tensor *src) { for (size_t i = 0; i < src->quant_params().size(); i++) { dst->AddQuantParam(src->quant_params().at(i)); @@ -237,4 +237,4 @@ int GroupConvCreator::GetSingleConvParam(ConvParameter *conv_param, std::vector< } return lite::RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_creator.h b/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_creator.h index c362a7ef..5b4ec0a3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_creator.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/group_convolution_creator.h @@ -24,7 +24,7 @@ #include "src/litert/tensor_category.h" #include "include/api/allocator.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { struct TensorInfo { std::vector shape_; AllocatorPtr allocator_; @@ -82,6 +82,6 @@ class GroupConvCreator { }; ConvParameter *CreateNewConvParameter(const ConvParameter *parameter); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_GROUP_CONVOLUTION_CREATOR_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/layout_transform.cc b/mindspore-lite/src/litert/kernel/cpu/base/layout_transform.cc index 32ffe271..f2e1cf90 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/layout_transform.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/layout_transform.cc @@ -18,7 +18,7 @@ #include "src/common/log_adapter.h" #include "schema/ops_types_generated.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { LayoutConvertor LayoutTransformFp32(mindspore::Format src_format, mindspore::Format dst_format) { if (src_format == mindspore::NHWC && dst_format == mindspore::NC4HW4) { return PackNHWCToNC4HW4Fp32; @@ -55,4 +55,4 @@ LayoutConvertor LayoutTransform(TypeId data_type, mindspore::Format src_format, return nullptr; } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/layout_transform.h b/mindspore-lite/src/litert/kernel/cpu/base/layout_transform.h index db0fb59f..37b147a9 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/layout_transform.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/layout_transform.h @@ -23,7 +23,7 @@ #include "nnacl_c/pack.h" #include "src/tensor.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { typedef void (*LayoutConvertor)(const void *src, void *dst, int batch, int plane, int channel); #ifdef ENABLE_FP16 LayoutConvertor LayoutTransformFp16(mindspore::Format src_format, mindspore::Format dst_format); @@ -34,6 +34,6 @@ LayoutConvertor LayoutTransformFp32(mindspore::Format src_format, mindspore::For LayoutConvertor LayoutTransformInt8(mindspore::Format src_format, mindspore::Format dst_format); LayoutConvertor LayoutTransform(TypeId data_type, mindspore::Format src_format, mindspore::Format dst_format); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_LAYOUT_TRANSFORM_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/partial_fusion.cc b/mindspore-lite/src/litert/kernel/cpu/base/partial_fusion.cc index 76a52612..8255fe0a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/partial_fusion.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/partial_fusion.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_PartialFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int PartialFusionKernel::Prepare() { return RET_OK; } int PartialFusionKernel::ReSize() { return RET_OK; } int PartialFusionKernel::Run() { return RET_OK; } @@ -33,4 +33,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_PartialFusion, LiteKernelCrea REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_PartialFusion, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_PartialFusion, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_PartialFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/partial_fusion.h b/mindspore-lite/src/litert/kernel/cpu/base/partial_fusion.h index 9862204c..8b9bd08c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/partial_fusion.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/partial_fusion.h @@ -21,7 +21,7 @@ #include "src/executor/kernel_exec.h" // this file is going to be removed when move create actor before schedule. -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class PartialFusionKernel : public LiteKernel { public: PartialFusionKernel(OpParameter *parameter, const std::vector &inputs, @@ -37,6 +37,6 @@ class PartialFusionKernel : public LiteKernel { // graphs, so use a vector. std::vector subgraph_kernels_{}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_PARTIAL_FUSION_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/quant_dtype_cast.cc b/mindspore-lite/src/litert/kernel/cpu/base/quant_dtype_cast.cc index 812f6dc3..8c30a2d8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/quant_dtype_cast.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/quant_dtype_cast.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_QuantDTypeCast; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int QuantDTypeCastCPUKernel::Prepare() { if (in_tensors_.size() != 1) { MS_LOG(ERROR) << "inputs number should be 1, but " << in_tensors_.size() << " is given."; @@ -319,4 +319,4 @@ int QuantDTypeCastCPUKernel::DoDequanInt8ToFp32ChannelCol(const int8_t *quant_va REG_KERNEL(kCPU, kNumberTypeUInt8, PrimitiveType_QuantDTypeCast, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_QuantDTypeCast, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_QuantDTypeCast, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/quant_dtype_cast.h b/mindspore-lite/src/litert/kernel/cpu/base/quant_dtype_cast.h index 240dc1b5..20f93ade 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/quant_dtype_cast.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/quant_dtype_cast.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class QuantDTypeCastCPUKernel : public LiteKernel { public: QuantDTypeCastCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -72,6 +72,6 @@ class QuantDTypeCastCPUKernel : public LiteKernel { int32_t dst_dtype{0}; int32_t quant_dst_dtype{0}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_QUANT_DTYPE_CAST_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/random_normal.cc b/mindspore-lite/src/litert/kernel/cpu/base/random_normal.cc index 456ee89f..b5e3e170 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/random_normal.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/random_normal.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_RandomNormal; using mindspore::schema::PrimitiveType_RandomStandardNormal; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int RandomNormalCPUKernel::Prepare() { CHECK_NULL_RETURN(param_); return RET_OK; @@ -72,4 +72,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_RandomStandardNormal, LiteKer REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_RandomNormal, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_RandomStandardNormal, LiteKernelCreator) #endif -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/random_normal.h b/mindspore-lite/src/litert/kernel/cpu/base/random_normal.h index 91ba1db0..ef2a96a3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/random_normal.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/random_normal.h @@ -23,7 +23,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class RandomNormalCPUKernel : public LiteKernel { public: RandomNormalCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class RandomNormalCPUKernel : public LiteKernel { protected: RandomNormalParam *param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_RANDOM_NORMAL_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/reduce_base.cc b/mindspore-lite/src/litert/kernel/cpu/base/reduce_base.cc index 70895774..ab2460a8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/reduce_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/reduce_base.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr size_t kInputNum = 1; constexpr size_t kOutputNum = 1; @@ -205,4 +205,4 @@ void ReduceBaseCPUKernel::DecideIfOnlyCopy() { only_copy_ = false; } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/reduce_base.h b/mindspore-lite/src/litert/kernel/cpu/base/reduce_base.h index 963e8085..3d133413 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/reduce_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/reduce_base.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/reduce_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ReduceBaseCPUKernel : public LiteKernel { public: ReduceBaseCPUKernel(OpParameter *param, const std::vector &inputs, @@ -54,6 +54,6 @@ class ReduceBaseCPUKernel : public LiteKernel { int inner_size_{0}; int axis_size_{0}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_REDUCE_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/resize_base.cc b/mindspore-lite/src/litert/kernel/cpu/base/resize_base.cc index eebc0ec8..a2ee5af4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/resize_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/resize_base.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_INVALID_OP_ATTR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kMaxInputNum = 4; constexpr int kOutputNum = 1; @@ -116,4 +116,4 @@ int ResizeBaseCPUKernel::Prepare() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/resize_base.h b/mindspore-lite/src/litert/kernel/cpu/base/resize_base.h index 1f94416e..1c9e8aa8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/resize_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/resize_base.h @@ -23,7 +23,7 @@ using mindspore::schema::PrimitiveType_Resize; using mindspore::schema::ResizeMethod; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ResizeBaseCPUKernel : public LiteKernel { public: ResizeBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,6 +47,6 @@ class ResizeBaseCPUKernel : public LiteKernel { int CheckParameters(); int CheckInputsOuputs(); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_RESIZE_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_base.cc b/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_base.cc index ea6bfa4f..11ede502 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_base.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ScatterNd; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kScatterIndicesIndex = 0; constexpr int kScatterUpdateIndex = 1; @@ -133,4 +133,4 @@ int ScatterNDCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_ScatterNd, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_base.h b/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_base.h index 043481f9..7945e4a6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_base.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/base/scatter_nd_binary.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ScatterNDCPUKernel : public LiteKernel { public: explicit ScatterNDCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -41,6 +41,6 @@ class ScatterNDCPUKernel : public LiteKernel { std::vector output_unit_offsets_; std::vector out_strides_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_SCATTER_ND_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_binary.cc b/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_binary.cc index 0c98fedc..6934040e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_binary.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_binary.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ScatterNDBinaryCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), DIMENSION_3D); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -109,4 +109,4 @@ int ScatterNDBinaryCPUKernel::ReSize() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_binary.h b/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_binary.h index 88972f13..6f44513b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_binary.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/scatter_nd_binary.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/base/scatter_nd_binary.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr int kScatterUpdateInputIndex = 0; constexpr int kScatterIndicesIndex = 1; constexpr int kScatterUpdateIndex = 2; @@ -42,6 +42,6 @@ class ScatterNDBinaryCPUKernel : public LiteKernel { ScatterNDParameter *param_ = nullptr; std::vector output_unit_offsets_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_BASE_SCATTER_ND_BINARY_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/select.cc b/mindspore-lite/src/litert/kernel/cpu/base/select.cc index e8b9882c..c2e7258e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/select.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/select.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Select; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr static int kConditionIdx = 0; constexpr static int kFirstIdx = 1; constexpr static int kSecondIdx = 2; @@ -190,4 +190,4 @@ int SelectCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Select, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/select.h b/mindspore-lite/src/litert/kernel/cpu/base/select.h index 44626c09..635c6a57 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/select.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/select.h @@ -20,7 +20,7 @@ #include "src/litert/lite_kernel.h" #include "src/tensorlist.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SelectCPUKernel : public LiteKernel { public: SelectCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -31,6 +31,6 @@ class SelectCPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_SELECT_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/split_base.cc b/mindspore-lite/src/litert/kernel/cpu/base/split_base.cc index 78b2d552..60c11b03 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/split_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/split_base.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Split; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SplitBaseCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -178,4 +178,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Split, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt64, PrimitiveType_Split, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Split, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/split_base.h b/mindspore-lite/src/litert/kernel/cpu/base/split_base.h index 8ec8ce5c..72b73382 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/split_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/split_base.h @@ -23,7 +23,7 @@ #include "nnacl_c/split_parameter.h" #include "nnacl_c/base/split_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SplitBaseCPUKernel : public LiteKernel { public: SplitBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -50,6 +50,6 @@ class SplitBaseCPUKernel : public LiteKernel { void *input_ptr_ = nullptr; std::vector output_ptr_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_SPLIT_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/split_with_over_lap_base.cc b/mindspore-lite/src/litert/kernel/cpu/base/split_with_over_lap_base.cc index 77cf39ce..3ff58b1a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/split_with_over_lap_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/split_with_over_lap_base.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SplitWithOverlap; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { const int MIN_NUM_SPLIT = 2; int SplitWithOverlapBaseCPUKernel::CalculateSplitedShapes(const std::vector &shape) { @@ -152,4 +152,4 @@ int SplitWithOverlapBaseCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SplitWithOverlap, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SplitWithOverlap, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/split_with_over_lap_base.h b/mindspore-lite/src/litert/kernel/cpu/base/split_with_over_lap_base.h index dc674980..4fe9278e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/split_with_over_lap_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/split_with_over_lap_base.h @@ -23,7 +23,7 @@ #include "nnacl_c/split_parameter.h" #include "nnacl_c/base/split_with_over_lap_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SplitWithOverlapBaseCPUKernel : public LiteKernel { public: SplitWithOverlapBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -50,6 +50,6 @@ class SplitWithOverlapBaseCPUKernel : public LiteKernel { char *input_ptr_{nullptr}; std::vector output_ptr_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_SPLIT_WITH_OVER_LAP_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/tensor_scatter_add.cc b/mindspore-lite/src/litert/kernel/cpu/base/tensor_scatter_add.cc index 41af720f..29f3bed3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/tensor_scatter_add.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/tensor_scatter_add.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_TensorScatterAdd; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int TensorScatterAddRun(void *cdata, int task_id, float, float) { auto kernel = static_cast(cdata); @@ -72,4 +72,4 @@ int TensorScatterAddCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TensorScatterAdd, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_TensorScatterAdd, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/tensor_scatter_add.h b/mindspore-lite/src/litert/kernel/cpu/base/tensor_scatter_add.h index 11ea39f9..8a70d831 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/tensor_scatter_add.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/tensor_scatter_add.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/base/scatter_nd_binary.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TensorScatterAddCPUKernel : public ScatterNDBinaryCPUKernel { public: explicit TensorScatterAddCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -31,6 +31,6 @@ class TensorScatterAddCPUKernel : public ScatterNDBinaryCPUKernel { int Run() override; int TensorScatterAdd(int task_id); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_BASE_TENSOR_SCATTER_ADD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/base/transpose_base.cc b/mindspore-lite/src/litert/kernel/cpu/base/transpose_base.cc index a6b56f07..48bd294a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/transpose_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/base/transpose_base.cc @@ -19,7 +19,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const std::vector kPermOpt = {0, 2, 1}; } // namespace @@ -280,4 +280,4 @@ int TransposeBaseCPUKernel::Run() { } return ParallelLaunch(this->ms_context_, TransposeImpl, this, thread_num_); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/base/transpose_base.h b/mindspore-lite/src/litert/kernel/cpu/base/transpose_base.h index d664ab2b..eac9e347 100644 --- a/mindspore-lite/src/litert/kernel/cpu/base/transpose_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/base/transpose_base.h @@ -20,7 +20,7 @@ #include "nnacl_c/transpose_parameter.h" #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TransposeBaseCPUKernel : public LiteKernel { public: explicit TransposeBaseCPUKernel(OpParameter *param, const std::vector &inputs, @@ -75,6 +75,6 @@ class TransposeBaseCPUKernel : public LiteKernel { // optimized perm std::vector perm_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_TRANSPOSE_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/control/switch.cc b/mindspore-lite/src/litert/kernel/cpu/control/switch.cc index 3efbb6ca..50174e51 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/switch.cc +++ b/mindspore-lite/src/litert/kernel/cpu/control/switch.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Switch; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SwitchCPUKernel::Prepare() { return RET_OK; } int SwitchCPUKernel::ReSize() { return RET_OK; } int SwitchCPUKernel::Run() { return RET_OK; } @@ -31,4 +31,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Switch, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Switch, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Switch, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/control/switch.h b/mindspore-lite/src/litert/kernel/cpu/control/switch.h index 338725ca..3515268f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/switch.h +++ b/mindspore-lite/src/litert/kernel/cpu/control/switch.h @@ -19,7 +19,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SwitchCPUKernel : public LiteKernel { public: SwitchCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,6 +30,6 @@ class SwitchCPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_CONTROL_SWITCH_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/control/switch_layer.cc b/mindspore-lite/src/litert/kernel/cpu/control/switch_layer.cc index 9d41c6ca..106ef7e8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/switch_layer.cc +++ b/mindspore-lite/src/litert/kernel/cpu/control/switch_layer.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SwitchLayer; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SwitchLayerCPUKernel::Prepare() { return RET_OK; } int SwitchLayerCPUKernel::ReSize() { return RET_OK; } int SwitchLayerCPUKernel::Run() { return RET_OK; } @@ -31,4 +31,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SwitchLayer, LiteKernelCreato REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SwitchLayer, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_SwitchLayer, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_SwitchLayer, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/control/switch_layer.h b/mindspore-lite/src/litert/kernel/cpu/control/switch_layer.h index c5d2accb..56be98b4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/switch_layer.h +++ b/mindspore-lite/src/litert/kernel/cpu/control/switch_layer.h @@ -19,7 +19,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SwitchLayerCPUKernel : public LiteKernel { public: SwitchLayerCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,6 +30,6 @@ class SwitchLayerCPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_CONTROL_SWITCH_LAYER_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensor_array.cc b/mindspore-lite/src/litert/kernel/cpu/control/tensor_array.cc index 46d0bd36..2b0c98c1 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensor_array.cc +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensor_array.cc @@ -31,7 +31,7 @@ using mindspore::schema::PrimitiveType_TensorArray; using mindspore::schema::PrimitiveType_TensorArrayRead; using mindspore::schema::PrimitiveType_TensorArrayWrite; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr int kTensorArrayReadInSize = 3; constexpr int kTensorArrayWriteInSize = 4; constexpr int kHandleIndex = 0; @@ -166,4 +166,4 @@ int TensorArrayWriteCPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensor_array.h b/mindspore-lite/src/litert/kernel/cpu/control/tensor_array.h index 6413ccdb..eb237340 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensor_array.h +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensor_array.h @@ -23,7 +23,7 @@ #include "src/litert/lite_kernel.h" #include "src/tensorlist.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TensorArrayCPUKernel : public LiteKernel { public: TensorArrayCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -83,6 +83,6 @@ class TensorArrayWriteCPUKernel : public TensorArrayBaseCPUKernel { int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_CONTROL_TENSOR_ARRAY_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_fromtensor.cc b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_fromtensor.cc index 9910c83e..d602f71a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_fromtensor.cc +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_fromtensor.cc @@ -26,7 +26,7 @@ using mindspore::schema::PrimitiveType_TensorListFromTensor; namespace { constexpr int kNumInputSize = 2; } -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int TensorListFromTensorCPUKernel::IsCompatibleShape() { if (input1_->data_type() != kNumberTypeInt && input1_->data_type() != kNumberTypeInt32) { // element_shape MS_LOG(ERROR) << "in_tensors_[1] data type is must be int"; @@ -122,4 +122,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TensorListFromTensor, REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_TensorListFromTensor, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_TensorListFromTensor, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_fromtensor.h b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_fromtensor.h index 0cbf87fe..4c3f3827 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_fromtensor.h +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_fromtensor.h @@ -23,7 +23,7 @@ #include "schema/model_generated.h" #include "nnacl_c/tensorlist_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TensorListFromTensorCPUKernel : public LiteKernel { public: TensorListFromTensorCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -44,6 +44,6 @@ class TensorListFromTensorCPUKernel : public LiteKernel { lite::Tensor *input1_ = nullptr; TypeId dtype_ = kTypeUnknown; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_CONTROL_TENSORLIST_FROMTENSOR_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_getitem.cc b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_getitem.cc index 722438ed..66fcd6b9 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_getitem.cc +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_getitem.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_TensorListGetItem; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int TensorListGetItemCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), kInputSize1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -75,4 +75,4 @@ int TensorListGetItemCPUKernel::ReSize() { return RET_OK; } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TensorListGetItem, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_TensorListGetItem, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_TensorListGetItem, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_getitem.h b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_getitem.h index 374c8234..29525dc3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_getitem.h +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_getitem.h @@ -23,7 +23,7 @@ #include "schema/model_generated.h" #include "nnacl_c/tensorlist_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TensorListGetItemCPUKernel : public LiteKernel { public: TensorListGetItemCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -41,6 +41,6 @@ class TensorListGetItemCPUKernel : public LiteKernel { int index_ = 0; int dtype_ = kTypeUnknown; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_CONTROL_TENSORLIST_GETITEM_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_reserve.cc b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_reserve.cc index a8e51e6d..8fe3e0df 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_reserve.cc +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_reserve.cc @@ -27,7 +27,7 @@ using mindspore::schema::PrimitiveType_TensorListReserve; namespace { constexpr int kNumInputSize = 2; } -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int TensorListReserveCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), kNumInputSize); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -67,4 +67,4 @@ int TensorListReserveCPUKernel::ReSize() { return RET_OK; } REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_TensorListReserve, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_TensorListReserve, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TensorListReserve, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_reserve.h b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_reserve.h index 4fdb8d92..84fb2eb9 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_reserve.h +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_reserve.h @@ -23,7 +23,7 @@ #include "schema/model_generated.h" #include "nnacl_c/tensorlist_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TensorListReserveCPUKernel : public LiteKernel { public: TensorListReserveCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -39,6 +39,6 @@ class TensorListReserveCPUKernel : public LiteKernel { private: TypeId element_dtype_ = kTypeUnknown; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_CONTROL_TENSORLIST_RESERVE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_setitem.cc b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_setitem.cc index 7f7a3b61..a2500e2a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_setitem.cc +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_setitem.cc @@ -28,7 +28,7 @@ namespace { constexpr int kNumInputSize = 3; constexpr int kNumInput2 = 2; } // namespace -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int TensorListSetItemCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), kNumInputSize); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -157,4 +157,4 @@ int TensorListSetItemCPUKernel::ReSize() { return RET_OK; } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TensorListSetItem, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_TensorListSetItem, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_TensorListSetItem, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_setitem.h b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_setitem.h index f3f516ba..a27e5b09 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_setitem.h +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_setitem.h @@ -23,7 +23,7 @@ #include "schema/model_generated.h" #include "nnacl_c/tensorlist_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TensorListSetItemCPUKernel : public LiteKernel { public: TensorListSetItemCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -44,6 +44,6 @@ class TensorListSetItemCPUKernel : public LiteKernel { lite::TensorList *output0_ = nullptr; int index_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_CONTROL_TENSORLIST_SETITEM_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_stack.cc b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_stack.cc index f3e8bcf4..1bfd6685 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_stack.cc +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_stack.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_TensorListStack; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int TensorListStackCPUKernel::CheckParam() { MS_CHECK_TRUE_RET(input0_ != nullptr, RET_NULL_PTR); MS_CHECK_TRUE_RET(output0_ != nullptr, RET_NULL_PTR); @@ -191,4 +191,4 @@ int TensorListStackCPUKernel::ReSize() { return RET_OK; } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TensorListStack, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_TensorListStack, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_TensorListStack, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_stack.h b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_stack.h index 85d3f03f..b2d331f0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_stack.h +++ b/mindspore-lite/src/litert/kernel/cpu/control/tensorlist_stack.h @@ -24,7 +24,7 @@ #include "schema/model_generated.h" #include "nnacl_c/tensorlist_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TensorListStackCPUKernel : public LiteKernel { public: TensorListStackCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -51,6 +51,6 @@ class TensorListStackCPUKernel : public LiteKernel { lite::Tensor *output0_ = nullptr; std::vector output_shape_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_CONTROL_TENSORLIST_STACK_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/biasadd_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/biasadd_fp16.cc index 78bdbaed..62550850 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/biasadd_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/biasadd_fp16.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_BiasAdd; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int BiasAddCPUFp16Kernel::ReSize() { auto dims = in_tensors_.at(0)->shape(); bias_param_->ndim_ = dims.size(); @@ -149,4 +149,4 @@ int BiasAddCPUFp16Kernel::Eval() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_BiasAdd, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/biasadd_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/biasadd_fp16.h index c4b838cd..ad311b90 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/biasadd_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/biasadd_fp16.h @@ -20,7 +20,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp16/arithmetic_fp16.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BiasAddCPUFp16Kernel : public LiteKernel { public: BiasAddCPUFp16Kernel(OpParameter *parameter, const std::vector &inputs, @@ -45,6 +45,6 @@ class BiasAddCPUFp16Kernel : public LiteKernel { TypeId bias_data_type_ = kNumberTypeFloat16; bool is_repack_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_BIASADD_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/cast_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/cast_fp16.cc index e564d6ef..83c7fb59 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/cast_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/cast_fp16.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Cast; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int CastFp16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { if (cdata == nullptr) { @@ -145,4 +145,4 @@ int CastFp16CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Cast, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/cast_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/cast_fp16.h index 447daff3..c206e092 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/cast_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/cast_fp16.h @@ -22,7 +22,7 @@ #include "nnacl_c/fp16/cast_fp16.h" #include "nnacl_c/base/cast_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CastFp16CPUKernel : public LiteKernel { public: CastFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class CastFp16CPUKernel : public LiteKernel { int stride_ = 0; int data_num_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_CAST_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/common_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/common_fp16.cc index 1a1d5dc6..a5f26ac4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/common_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/common_fp16.cc @@ -20,7 +20,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { float16_t *ConvertInputFp32toFp16(lite::Tensor *input, const lite::InnerContext *ctx) { MS_CHECK_TRUE_MSG(input != nullptr, nullptr, "input must be not a nullptr."); float16_t *fp16_data = nullptr; @@ -77,4 +77,4 @@ int ConvertFp32TensorToFp16(lite::Tensor *tensor, const lite::InnerContext *ctx) ctx->allocator->Free(fp32_data); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/common_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/common_fp16.h index cb43da36..866f2380 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/common_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/common_fp16.h @@ -19,12 +19,12 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { float16_t *ConvertInputFp32toFp16(lite::Tensor *input, const lite::InnerContext *ctx); float16_t *MallocOutputFp16(lite::Tensor *output, const lite::InnerContext *ctx); int ConvertFp32TensorToFp16(lite::Tensor *tensor, const lite::InnerContext *ctx); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_COMMON_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_1x1_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_1x1_fp16.cc index 493f7fa0..c9591f94 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_1x1_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_1x1_fp16.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int Convolution1x1FP16CPUKernel::InitMatmulParam() { matmul_param_->row_ = conv_param_->output_h_ * conv_param_->output_w_; matmul_param_->col_ = conv_param_->output_channel_; @@ -334,4 +334,4 @@ int Convolution1x1FP16CPUKernel::Run() { pack_input_ = nullptr; return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_1x1_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_1x1_fp16.h index 640e789a..e79cf5b7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_1x1_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_1x1_fp16.h @@ -25,7 +25,7 @@ #include "nnacl_c/matmul_parameter.h" #include "nnacl_c/fp16/matmul_fp16.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class Convolution1x1FP16CPUKernel : public ConvolutionBaseCPUKernel { public: Convolution1x1FP16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -61,6 +61,6 @@ class Convolution1x1FP16CPUKernel : public ConvolutionBaseCPUKernel { int col_tile_; int row_tile_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_CONVOLUTION_1X1_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_delegate_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_delegate_fp16.cc index 53d04f42..9693e7e2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_delegate_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_delegate_fp16.cc @@ -35,7 +35,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2DFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionDelegateFP16CPUKernel::FreeCopiedData() { if ((origin_weight_ != nullptr) && (need_free_ & WEIGHT_NEED_FREE)) { free(origin_weight_); @@ -245,4 +245,4 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector & } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Conv2DFusion, CpuConvFp16KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_delegate_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_delegate_fp16.h index 94c294d3..77e1b750 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_delegate_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_delegate_fp16.h @@ -25,7 +25,7 @@ #define WEIGHT_NEED_FREE 0001 #define BIAS_NEED_FREE 0010 -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDelegateFP16CPUKernel : public LiteKernel { public: ConvolutionDelegateFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -100,6 +100,6 @@ class ConvolutionDelegateFP16CPUKernel : public LiteKernel { void *origin_bias_ = nullptr; kernel::LiteKernel *fp16_conv_kernel_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_CONVOLUTION_DELEGATE_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_3x3_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_3x3_fp16.cc index 88e9f720..fe48ef90 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_3x3_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_3x3_fp16.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionDepthwise3x3Fp16CPUKernel::PackWeight() { auto weight_tensor = in_tensors_.at(kWeightIndex); int channel = weight_tensor->Batch(); @@ -148,5 +148,5 @@ int ConvolutionDepthwise3x3Fp16CPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_3x3_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_3x3_fp16.h index 28a8714a..ec6b583e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_3x3_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_3x3_fp16.h @@ -23,7 +23,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwise3x3Fp16CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwise3x3Fp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -45,6 +45,6 @@ class ConvolutionDepthwise3x3Fp16CPUKernel : public ConvolutionBaseCPUKernel { float16_t *output_ptr_ = nullptr; float16_t *buffer_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_CONVOLUTION_DEPTHWISE_3X3_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_fp16.cc index b38c694a..ae5bcf44 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_fp16.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionDepthwiseFp16CPUKernel::PackWeight() { auto weight_tensor = in_tensors_.at(kWeightIndex); void *origin_weight = (op_parameter_->is_train_session_) ? weight_tensor->data() : origin_weight_; @@ -126,4 +126,4 @@ int ConvolutionDepthwiseFp16CPUKernel::Run() { } return ret; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_fp16.h index bc8ded4b..959fe355 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_fp16.h @@ -31,7 +31,7 @@ void ConvDwFp16(float16_t *output_data, const float16_t *input_data, const float } #endif -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -50,6 +50,6 @@ class ConvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseCPUKernel { void PackWeight() override; int MallocWeightBiasData() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_CONVOLUTION_DEPTHWISE_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_slidewindow_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_slidewindow_fp16.cc index 50edd50d..d80d057e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_slidewindow_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_slidewindow_fp16.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { ConvolutionDepthwiseSWFp16CPUKernel::~ConvolutionDepthwiseSWFp16CPUKernel() { if (sliding_ != nullptr) { delete sliding_; @@ -196,4 +196,4 @@ void ConvolutionDepthwiseSWFp16CPUKernel::FreePackedInputOutput() { packed_output_ = nullptr; } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_slidewindow_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_slidewindow_fp16.h index a535bedf..c45738c7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_slidewindow_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_depthwise_slidewindow_fp16.h @@ -32,7 +32,7 @@ void ConvDwC8Fp16(float16_t *output_data, const float16_t *input_data, const flo } #endif -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwiseSWFp16CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseSWFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -57,6 +57,6 @@ class ConvolutionDepthwiseSWFp16CPUKernel : public ConvolutionBaseCPUKernel { float16_t *packed_output_ = nullptr; bool need_align_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_CONVOLUTION_DEPTHWISE_SLIDEWINDOW_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_fp16.cc index a8d04577..3460f216 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_fp16.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionFP16CPUKernel::PackWeight() { auto filter_tensor = in_tensors_.at(kWeightIndex); int in_channel = filter_tensor->Channel(); @@ -190,4 +190,4 @@ int ConvolutionFP16CPUKernel::Run() { FreeTmpBuffer(); return ret; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_fp16.h index 08afcf51..15af5e0d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_fp16.h @@ -22,7 +22,7 @@ #include "src/litert/lite_kernel.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionFP16CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -56,6 +56,6 @@ class ConvolutionFP16CPUKernel : public ConvolutionBaseCPUKernel { int col_tile_; int row_tile_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_CONVOLUTION_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_winograd_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_winograd_fp16.cc index 569c5a86..8dfe27d2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_winograd_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_winograd_fp16.cc @@ -19,7 +19,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ConvolutionWinogradFP16CPUKernel::WinogradFilterTransformFp16(const float16_t *weight_data, const float *matrix_g, const float *matrix_gt, int oc_block) { if (oc_block == 0) { @@ -272,4 +272,4 @@ int ConvolutionWinogradFP16CPUKernel::Run() { FreeTmpBuffer(); return ret; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_winograd_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_winograd_fp16.h index c12cf980..b57bf2f9 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_winograd_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/convolution_winograd_fp16.h @@ -26,7 +26,7 @@ #include "src/common/utils.h" #include "nnacl_c/base/minimal_filtering_generator.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionWinogradFP16CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionWinogradFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -86,6 +86,6 @@ class ConvolutionWinogradFP16CPUKernel : public ConvolutionBaseCPUKernel { int col_tile_ = 0; int row_tile_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_CONVOLUTION_WINOGRAD_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/custom_gru_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/custom_gru_fp16.cc index 4dda8d7a..ed78774c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/custom_gru_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/custom_gru_fp16.cc @@ -30,7 +30,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NOT_SUPPORT; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int CustomGruFp16CPUKernel::InitWeightAndBias() { auto weight_shape = in_tensors_[1]->shape(); auto hidden_size = weight_shape[0] / C3NUM; @@ -129,5 +129,5 @@ int CustomGruFp16CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimType_Inner_CustomGru, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/custom_gru_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/custom_gru_fp16.h index 6ff3fd4b..81debf2d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/custom_gru_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/custom_gru_fp16.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/custom_gru_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CustomGruFp16CPUKernel : public CustomGruCPUKernel { public: CustomGruFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,6 +35,6 @@ class CustomGruFp16CPUKernel : public CustomGruCPUKernel { protected: int InitWeightAndBias() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_FP16_CUSTOM_GRU_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_depthwise_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_depthwise_fp16.cc index 9059260b..cdee7532 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_depthwise_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_depthwise_fp16.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { DeconvolutionDepthwiseFp16CPUKernel::~DeconvolutionDepthwiseFp16CPUKernel() { if (sliding_ != nullptr) { delete sliding_; @@ -227,4 +227,4 @@ void DeconvolutionDepthwiseFp16CPUKernel::FreePackedInputOutput() { packed_output_ = nullptr; } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_depthwise_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_depthwise_fp16.h index ea28cb1c..575241a1 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_depthwise_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_depthwise_fp16.h @@ -33,7 +33,7 @@ void ComputeStrides(int *shape, int *strides, int ndim); } #endif -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DeconvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseCPUKernel { public: DeconvolutionDepthwiseFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -59,6 +59,6 @@ class DeconvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseCPUKernel { float16_t *packed_output_ = nullptr; bool need_align_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_DECONVOLUTION_DEPTHWISE_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_fp16.cc index b764ac88..89925122 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_fp16.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { DeConvolutionFp16CPUKernel::~DeConvolutionFp16CPUKernel() { if (matmul_param_ != nullptr) { delete matmul_param_; @@ -348,4 +348,4 @@ kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Conv2dTransposeFusion, CpuDeConvFp16KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_fp16.h index 29666797..29ab52ac 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_fp16.h @@ -23,7 +23,7 @@ #include "src/litert/kernel_registry.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DeConvolutionFp16CPUKernel : public ConvolutionBaseCPUKernel { public: DeConvolutionFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -59,5 +59,5 @@ class DeConvolutionFp16CPUKernel : public ConvolutionBaseCPUKernel { float16_t *batch_input_ = nullptr; float16_t *batch_output_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_DECONVOLUTION_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_winograd_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_winograd_fp16.cc index 04c16cef..36eca55c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_winograd_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_winograd_fp16.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { DeConvWinogradFp16CPUKernel::~DeConvWinogradFp16CPUKernel() { FreeResizeBuf(); FreeDeconvParam(); @@ -501,4 +501,4 @@ int DeConvWinogradFp16CPUKernel::Run() { FreeRunBuf(); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_winograd_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_winograd_fp16.h index 6e276d3f..8219445d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_winograd_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/deconvolution_winograd_fp16.h @@ -24,7 +24,7 @@ #include "nnacl_c/fp16/pack_fp16.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DeConvWinogradFp16CPUKernel : public ConvolutionBaseCPUKernel { public: DeConvWinogradFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -61,5 +61,5 @@ class DeConvWinogradFp16CPUKernel : public ConvolutionBaseCPUKernel { int thread_stride_hw_ = 0; bool valid_weight_shape_ = true; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_DECONVOLUTION_WINOGRAD_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/dynamic_quant_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/dynamic_quant_fp16.cc index 0722c9d6..1835eacf 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/dynamic_quant_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/dynamic_quant_fp16.cc @@ -30,7 +30,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_DynamicQuant; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kBucketNums = 8; constexpr int k8Bit = 8; @@ -223,4 +223,4 @@ kernel::LiteKernel *DynamicQuantFp16CPUCreator(const std::vector } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_DynamicQuant, DynamicQuantFp16CPUCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/dynamic_quant_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/dynamic_quant_fp16.h index 8ceb33b8..d553e0cb 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/dynamic_quant_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/dynamic_quant_fp16.h @@ -21,7 +21,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DynamicQuantFp16CPUKernel : public LiteKernel { public: DynamicQuantFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -53,6 +53,6 @@ class DynamicQuantFp16CPUKernel : public LiteKernel { int32_t dst_dtype_{0}; bool symmetric_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_DYNAMIC_QUANT_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/fullconnection_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/fullconnection_fp16.cc index bb925bf6..70f452de 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/fullconnection_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/fullconnection_fp16.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_FullConnection; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int FullconnectionFP16CPUKernel::InitAShape() { auto a_shape = in_tensors_.at(0)->shape(); MS_CHECK_TRUE_MSG(a_shape.size(), C2NUM, "fully-connection A-metrics' shape is invalid."); @@ -80,4 +80,4 @@ int FullconnectionFP16CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_FullConnection, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/fullconnection_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/fullconnection_fp16.h index 79560d7a..68c43ad0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/fullconnection_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/fullconnection_fp16.h @@ -21,7 +21,7 @@ #include #include "src/litert/kernel/cpu/fp16/matmul_base_fp16.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class FullconnectionFP16CPUKernel : public MatmulBaseFP16CPUKernel { public: explicit FullconnectionFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -37,6 +37,6 @@ class FullconnectionFP16CPUKernel : public MatmulBaseFP16CPUKernel { int InitAShape() override; int InitBShape() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_FULLCONNECTION_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/group_convolution_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/group_convolution_fp16.cc index 7d431744..9dca7c9a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/group_convolution_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/group_convolution_fp16.cc @@ -20,7 +20,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int GroupConvolutionFP16CPUKernel::Separate(int task_id) { auto plane_step = UP_DIV(in_plane_, in_thread_num_); auto begin_plane = plane_step * task_id; @@ -156,4 +156,4 @@ int GroupConvolutionFP16CPUKernel::Prepare() { } return GroupConvolutionBaseCPUKernel::Prepare(); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/group_convolution_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/group_convolution_fp16.h index 1441abc4..9398b9d4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/group_convolution_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/group_convolution_fp16.h @@ -24,7 +24,7 @@ #include "src/litert/kernel/cpu/base/group_convolution_base.h" #include "nnacl_c/fp16/conv_fp16.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class GroupConvolutionFP16CPUKernel : public GroupConvolutionBaseCPUKernel { public: GroupConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -48,6 +48,6 @@ class GroupConvolutionFP16CPUKernel : public GroupConvolutionBaseCPUKernel { float16_t *sub_out_src_ = nullptr; float16_t *sub_out_dst_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GROUP_CONVOLUTION_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/gru_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/gru_fp16.cc index 5bdbdd9f..28ce0ef0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/gru_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/gru_fp16.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_GRU; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void GruFp16CPUKernel::FreeTmpBuffer() { if (weight_g_ptr_ != nullptr) { free(weight_g_ptr_); @@ -302,4 +302,4 @@ int GruFp16CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_GRU, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/gru_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/gru_fp16.h index c5bf5dac..ea289028 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/gru_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/gru_fp16.h @@ -19,7 +19,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/gru_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class GruFp16CPUKernel : public LiteKernel { public: GruFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -58,6 +58,6 @@ class GruFp16CPUKernel : public LiteKernel { bool is_vec_ = false; GruParameter *gru_param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRU_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/instance_norm_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/instance_norm_fp16.cc index 9dc1d9ca..aceb6284 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/instance_norm_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/instance_norm_fp16.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_InstanceNorm; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void InstanceNormFp16CPUKernel::FreeTmpBuffer() { if (in_tensors_[1]->data_type() == kNumberTypeFloat32) { if (gamma_data_ != nullptr) { @@ -121,4 +121,4 @@ int InstanceNormFp16CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_InstanceNorm, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/instance_norm_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/instance_norm_fp16.h index 31009de6..c8c96d5f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/instance_norm_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/instance_norm_fp16.h @@ -21,7 +21,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class InstanceNormFp16CPUKernel : public LiteKernel { public: InstanceNormFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -52,6 +52,6 @@ class InstanceNormFp16CPUKernel : public LiteKernel { float16_t *beta_data_ = nullptr; bool input_pack_to_nc8hw8_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_INSTANCE_NORM_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/layout_transform_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/layout_transform_fp16.cc index 361a0870..9cd406ad 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/layout_transform_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/layout_transform_fp16.cc @@ -18,7 +18,7 @@ #include "src/common/log_adapter.h" #include "schema/ops_types_generated.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { LayoutConvertor LayoutTransformFp16(mindspore::Format src_format, mindspore::Format dst_format) { if (src_format == mindspore::NHWC && dst_format == mindspore::NC4HW4) { return PackNHWCToNC4HW4Fp16; @@ -36,4 +36,4 @@ LayoutConvertor LayoutTransformFp16(mindspore::Format src_format, mindspore::For return nullptr; } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/layout_transform_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/layout_transform_fp16.h index 821fd704..51847ddf 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/layout_transform_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/layout_transform_fp16.h @@ -19,8 +19,8 @@ #include "src/litert/kernel/cpu/base/layout_transform.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { LayoutConvertor LayoutTransformFp16(mindspore::Format src_format, mindspore::Format dst_format); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_LAYOUT_TRANSFORM_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16.cc index a91cb249..0e31f3e7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LSTM; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr size_t kMindirInputTensorNum = 4; } // namespace @@ -55,4 +55,4 @@ LiteKernel *LstmFp16KernelCreator(const std::vector &inputs, con } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LSTM, LstmFp16KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16_base.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16_base.cc index 2d82ff32..17fe70d5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16_base.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kGateNum = 4; constexpr int kTempInputBufferIndex = 0; @@ -391,4 +391,4 @@ void LstmFp16BaseCPUKernel::FreeRunBuffer() { } } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16_base.h b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16_base.h index f68ac600..d4264688 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_fp16_base.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/lstm_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class LstmFp16BaseCPUKernel : public LiteKernel { public: LstmFp16BaseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -68,6 +68,6 @@ class LstmFp16BaseCPUKernel : public LiteKernel { void FreeRunBuffer(); int MallocRunBuffer(); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_FP16_LSTM_FP16_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_mindir_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_mindir_fp16.cc index 4977df7b..7ec4b97a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_mindir_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_mindir_fp16.cc @@ -17,7 +17,7 @@ #include "src/litert/kernel/cpu/fp16/lstm_mindir_fp16.h" #include "nnacl_c/fp16/lstm_fp16.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr size_t kMindirInputTensorNum = 4; constexpr int kWeightsIndex = 3; @@ -155,4 +155,4 @@ int LstmMindirFp16CPUKernel::InitProjectWeight() { (void)memset(project_bias_, 0, bias_size); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_mindir_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_mindir_fp16.h index 5cae076a..d0725d7b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_mindir_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_mindir_fp16.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp16/lstm_fp16_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { /* * 1. LSTM without project, output_size = hidden_size * h_init: second input, shape is [bidirectional, batch_size, hidden_size] @@ -55,6 +55,6 @@ class LstmMindirFp16CPUKernel : public LstmFp16BaseCPUKernel { private: bool gpu_orig_state_{false}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_FP16_LSTM_MINDIR_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_non_mindir_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_non_mindir_fp16.cc index cf7a32e4..4496a0b0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_non_mindir_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_non_mindir_fp16.cc @@ -20,7 +20,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kGateNum = 4; constexpr size_t kInputTensorNumMin = 6; @@ -128,4 +128,4 @@ int LstmNonMindirFp16CPUKernel::InitProjectWeight() { (void)memset(project_bias_, 0, bias_size); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_non_mindir_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_non_mindir_fp16.h index 0202fa7f..df4e1180 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_non_mindir_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/lstm_non_mindir_fp16.h @@ -19,7 +19,7 @@ #include #include "src/litert/kernel/cpu/fp16/lstm_fp16_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { /* * 1. LSTM without project, output_size = hidden_size * weight_ih: second input, shape is [bidirectional, 4 * hidden_size, input_size] @@ -55,6 +55,6 @@ class LstmNonMindirFp16CPUKernel : public LstmFp16BaseCPUKernel { int InitStateWeightBias() override; int InitProjectWeight() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_FP16_LSTM_NON_MINDIR_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_base_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_base_fp16.cc index 9f01f0f5..7ca800f4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_base_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_base_fp16.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_INPUT_TENSOR_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int MatmulBaseFP16Run(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); auto op = reinterpret_cast(cdata); @@ -387,4 +387,4 @@ int MatmulBaseFP16CPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_base_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_base_fp16.h index 3a467f34..aae5c3d8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_base_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_base_fp16.h @@ -25,7 +25,7 @@ #include "src/common/common.h" #include "nnacl_c/matmul_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatmulBaseFP16CPUKernel : public LiteKernel { public: explicit MatmulBaseFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -79,6 +79,6 @@ class MatmulBaseFP16CPUKernel : public LiteKernel { float16_t *batch_b_ptr_ = nullptr; float16_t *batch_c_ptr_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_MATMUL_BASE_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_fp16.cc index 70650f2b..e6d00914 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_fp16.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_MatMulFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int MatmulFP16CPUKernel::InitAShape() { auto a_shape = in_tensors_[0]->shape(); MS_CHECK_TRUE_MSG(a_shape.size() >= DIMENSION_2D, RET_ERROR, "A-metric tensor's shape is invalid."); @@ -108,4 +108,4 @@ int MatmulFP16CPUKernel::Eval() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_MatMulFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_fp16.h index d99d8d2c..053fbea4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/matmul_fp16.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp16/matmul_base_fp16.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatmulFP16CPUKernel : public MatmulBaseFP16CPUKernel { public: explicit MatmulFP16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -36,6 +36,6 @@ class MatmulFP16CPUKernel : public MatmulBaseFP16CPUKernel { int InitAShape() override; int InitBShape() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_MATMUL_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/quant_dtype_cast_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/quant_dtype_cast_fp16.cc index 4841a3bb..aa6755cb 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/quant_dtype_cast_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/quant_dtype_cast_fp16.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_QuantDTypeCast; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int QuantDTypeCastFp16CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -226,4 +226,4 @@ kernel::LiteKernel *CpuQuantDTypeCastFp16KernelCreator(const std::vector #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class QuantDTypeCastFp16CPUKernel : public LiteKernel { public: QuantDTypeCastFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -44,6 +44,6 @@ class QuantDTypeCastFp16CPUKernel : public LiteKernel { bool int_to_float_ = false; bool is_uint8_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_QUANT_DTYPE_CAST_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/resize_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16/resize_fp16.cc index 2f26862c..25a8d053 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/resize_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/resize_fp16.cc @@ -30,7 +30,7 @@ using mindspore::schema::CoordinateTransformMode_ASYMMETRIC; using mindspore::schema::CoordinateTransformMode_HALF_PIXEL; using mindspore::schema::PrimitiveType_Resize; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ResizeFp16CPUKernel::ResizePrepare() { CHECK_NULL_RETURN(in_tensors_.front()); CHECK_NULL_RETURN(out_tensors_.front()); @@ -92,4 +92,4 @@ int ResizeFp16CPUKernel::RunImpl(int task_id) { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Resize, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16/resize_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16/resize_fp16.h index faf0944b..c07036a1 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16/resize_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16/resize_fp16.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/cpu/fp32/resize_fp32.h" #include "nnacl_c/fp16/resize_fp16.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ResizeFp16CPUKernel : public ResizeCPUKernel { public: ResizeFp16CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,6 +35,6 @@ class ResizeFp16CPUKernel : public ResizeCPUKernel { int DataTypeLen() override; int RunImpl(int task_id) override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_RESIZE_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/activation_fp16_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/activation_fp16_grad.cc index f6d1a1d2..a8cb3e5f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/activation_fp16_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/activation_fp16_grad.cc @@ -28,7 +28,7 @@ using mindspore::schema::ActivationType_RELU; using mindspore::schema::ActivationType_SIGMOID; using mindspore::schema::PrimitiveType_ActivationGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ActivationGradCPUKernelFp16::Prepare() { if (in_tensors_.size() != C2NUM) { MS_LOG(ERROR) << "ActivationGrad should have 2 input tensors"; @@ -112,4 +112,4 @@ int ActivationGradCPUKernelFp16::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_ActivationGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/activation_fp16_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/activation_fp16_grad.h index ee88b288..2eacf41b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/activation_fp16_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/activation_fp16_grad.h @@ -22,7 +22,7 @@ #include "nnacl_c/fp16_grad/activation_grad_fp16.h" #include "nnacl_c/fp32_grad/activation_grad_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ActivationGradCPUKernelFp16 : public LiteKernel { public: explicit ActivationGradCPUKernelFp16(OpParameter *param, const std::vector &inputs, @@ -41,6 +41,6 @@ class ActivationGradCPUKernelFp16 : public LiteKernel { ActivationGradParameter *param_act_grad_; int thread_count_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_ACTIVATION_FP16_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_grad.cc index 064eb5f3..b28ed5d7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_grad.cc @@ -25,7 +25,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr static int kX1Idx = 0; constexpr static int kX2Idx = 1; constexpr static int kDyIdx = 2; @@ -108,4 +108,4 @@ int ArithmeticGradCPUKernelFp16::Run() { REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_MaximumGrad, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_MinimumGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_grad.h index 20c45ceb..84ea8b01 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_grad.h @@ -29,7 +29,7 @@ using mindspore::schema::PrimitiveType_MinimumGrad; using mindspore::schema::PrimitiveType_MulGrad; using mindspore::schema::PrimitiveType_SubGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ArithmeticGradCPUKernelFp16; @@ -74,6 +74,6 @@ class ArithmeticGradCPUKernelFp16 : public LiteKernel { float16_t *tile_data1; float16_t *tile_data2; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_ARITHMETIC_FP16_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.cc index 382ca27c..8ccf2e3c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LogGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ArithmeticSelfGradFp16CPUKernel::Prepare() { if (in_tensors_.size() != C2NUM) { MS_LOG(ERROR) << "ActivationGrad should have 2 input tensors"; @@ -88,4 +88,4 @@ int ArithmeticSelfGradFp16CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LogGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.h index ac5e6e73..2450706c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/arithmetic_fp16_self_grad.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp16_grad/arithmetic_self_grad.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ArithmeticSelfGradFp16CPUKernel : public LiteKernel { public: explicit ArithmeticSelfGradFp16CPUKernel(OpParameter *param, const std::vector &inputs, @@ -40,6 +40,6 @@ class ArithmeticSelfGradFp16CPUKernel : public LiteKernel { ArithmeticSelfGradParameterFp16 *param_act_grad_; int thread_count_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_ARITHMETIC_FP16_SELF_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bias_fp16_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bias_fp16_grad.cc index a94117b3..7d808ef9 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bias_fp16_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bias_fp16_grad.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_BiasAddGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr static int kMaxDim = 4; int BiasGradCPUKernelFp16::ReSize() { @@ -100,4 +100,4 @@ int BiasGradCPUKernelFp16::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_BiasAddGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bias_fp16_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bias_fp16_grad.h index 24c72273..b41aaede 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bias_fp16_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bias_fp16_grad.h @@ -21,7 +21,7 @@ #include "src/executor/kernel_exec.h" #include "nnacl_c/fp16/arithmetic_fp16.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BiasGradCPUKernelFp16 : public LiteKernel { public: explicit BiasGradCPUKernelFp16(OpParameter *parameter, const std::vector &inputs, @@ -39,6 +39,6 @@ class BiasGradCPUKernelFp16 : public LiteKernel { private: ArithmeticParameter *bias_param; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_BIAS_FP16_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bn_fp16_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bn_fp16_grad.cc index b8817f9e..c6586aa4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bn_fp16_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bn_fp16_grad.cc @@ -33,7 +33,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_BatchNormGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kNumInputDim_0 = 0; constexpr int kNumInputDim_1 = 1; @@ -191,4 +191,4 @@ int BNGradCPUKernelFp16::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_BatchNormGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bn_fp16_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bn_fp16_grad.h index 6b931821..2f3ad3c8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bn_fp16_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/bn_fp16_grad.h @@ -21,7 +21,7 @@ #include "src/executor/kernel_exec.h" #include "nnacl_c/fp32_grad/batch_norm_grad.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BNGradCPUKernelFp16 : public LiteKernel { public: @@ -39,5 +39,5 @@ class BNGradCPUKernelFp16 : public LiteKernel { int stage_ = 0; size_t ws_size_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_BN_FP16_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_filter.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_filter.cc index 29b3f479..5dbf2d58 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_filter.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_filter.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2DBackpropFilterFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ConvolutionGradFilterCPUKernelFp16::ReSize() { // dy is in input 0 // x is in input 1 @@ -214,4 +214,4 @@ int ConvolutionGradFilterCPUKernelFp16::Run() { REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Conv2DBackpropFilterFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_filter.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_filter.h index bd1e4912..281fb89d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_filter.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_filter.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionGradFilterCPUKernelFp16 : public LiteKernel { public: explicit ConvolutionGradFilterCPUKernelFp16(OpParameter *parameter, const std::vector &inputs, @@ -46,6 +46,6 @@ class ConvolutionGradFilterCPUKernelFp16 : public LiteKernel { const int chunk_ = C32NUM; #endif }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_CONVOLUTION_FP16_GRAD_FILTER_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_input.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_input.cc index 80c65cb7..f57ec02e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_input.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_input.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2DBackpropInputFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ConvolutionGradInputCPUKernelFp16::ReSize() { auto *dy_tensor = in_tensors_.at(kInputIndex); MS_ASSERT(dy_tensor != nullptr); @@ -191,4 +191,4 @@ int ConvolutionGradInputCPUKernelFp16::Run() { REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Conv2DBackpropInputFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_input.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_input.h index 99e1f68d..4fa6c9ee 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_input.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/convolution_fp16_grad_input.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionGradInputCPUKernelFp16 : public LiteKernel { public: explicit ConvolutionGradInputCPUKernelFp16(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class ConvolutionGradInputCPUKernelFp16 : public LiteKernel { bool do_dw_fp16_ = false; const int chunk_ = C16NUM; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_CONVOLUTION_FP16_GRAD_INPUT_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/dropout_fp16_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/dropout_fp16_grad.cc index 86ad09d5..edbce653 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/dropout_fp16_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/dropout_fp16_grad.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_DropoutGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int DropoutGradCPUKernelFp16::Prepare() { CHECK_NULL_RETURN(op_parameter_); auto param = reinterpret_cast(op_parameter_); @@ -99,4 +99,4 @@ int DropoutGradCPUKernelFp16::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_DropoutGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/dropout_fp16_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/dropout_fp16_grad.h index cac1e92c..0d7c56cc 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/dropout_fp16_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/dropout_fp16_grad.h @@ -19,7 +19,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DropoutGradCPUKernelFp16 : public LiteKernel { public: DropoutGradCPUKernelFp16(OpParameter *parameter, const std::vector &inputs, @@ -38,6 +38,6 @@ class DropoutGradCPUKernelFp16 : public LiteKernel { int thread_count_ = 1; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_DROPOUT_FP16_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/layernorm_fp16_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/layernorm_fp16_grad.cc index a77b764a..91908fbd 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/layernorm_fp16_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/layernorm_fp16_grad.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LayerNormGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kNumInputDim_0 = 0; constexpr int kNumInputDim_1 = 1; @@ -135,4 +135,4 @@ int LayerNormGradCPUKernelFp16::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LayerNormGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/layernorm_fp16_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/layernorm_fp16_grad.h index bc4622c8..ca2ceba6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/layernorm_fp16_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/layernorm_fp16_grad.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class LayerNormGradCPUKernelFp16 : public LiteKernel { public: @@ -39,5 +39,5 @@ class LayerNormGradCPUKernelFp16 : public LiteKernel { int param_num_ = 1; int param_size_ = 1; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_LAYERNORM_FP16_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/neg_fp16_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/neg_fp16_grad.cc index 10cf6d17..62b77c25 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/neg_fp16_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/neg_fp16_grad.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_NegGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int NegGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); @@ -72,4 +72,4 @@ int NegGradCPUKernelFp16::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_NegGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/neg_fp16_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/neg_fp16_grad.h index 189d952a..40ba094d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/neg_fp16_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/neg_fp16_grad.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "schema/model_generated.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class NegGradCPUKernelFp16 : public LiteKernel { public: @@ -37,6 +37,6 @@ class NegGradCPUKernelFp16 : public LiteKernel { private: int thread_count_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_NEG_FP16_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/pooling_fp16_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/pooling_fp16_grad.cc index 2dc0d177..e6da2677 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/pooling_fp16_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/pooling_fp16_grad.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_AvgPoolGrad; using mindspore::schema::PrimitiveType_MaxPoolGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kNumInputDim_2 = 2; constexpr int kNumShapeDim_2 = 2; @@ -127,4 +127,4 @@ int PoolingGradCPUKernelFp16::Run() { REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_AvgPoolGrad, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_MaxPoolGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/pooling_fp16_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/pooling_fp16_grad.h index 10bf78d7..3eeda079 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/pooling_fp16_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/pooling_fp16_grad.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/kernel/pooling.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { using mindspore::schema::PadMode; using mindspore::schema::PoolMode; using mindspore::schema::QuantType; @@ -44,6 +44,6 @@ class PoolingGradCPUKernelFp16 : public LiteKernel { PoolingComputeParam compute_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_POOLING_FP16_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/resize_fp16_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/resize_fp16_grad.cc index 57064d04..2f63cc57 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/resize_fp16_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/resize_fp16_grad.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ResizeGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { float16_t ScalingFp16(size_t in_size, size_t out_size, bool align_corners) { return (align_corners && out_size > 1) ? (in_size - 1) / static_cast(out_size - 1) : in_size / static_cast(out_size); @@ -113,4 +113,4 @@ int ResizeGradCPUKernelFp16::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_ResizeGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/resize_fp16_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/resize_fp16_grad.h index 8d51a092..5f8d35f0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/resize_fp16_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/resize_fp16_grad.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ResizeGradCPUKernelFp16 : public LiteKernel { public: explicit ResizeGradCPUKernelFp16(OpParameter *parameter, const std::vector &inputs, @@ -33,6 +33,6 @@ class ResizeGradCPUKernelFp16 : public LiteKernel { int ExecuteInit(int task_id); int DoExecute(int task_id); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_RESIZE_FP16_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/strided_slice_fp16_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/strided_slice_fp16_grad.cc index ca0fb484..cdaa852c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/strided_slice_fp16_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/strided_slice_fp16_grad.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_StridedSliceGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int StridedSliceGradCPUKernelFp16::Prepare() { if (!InferShapeDone()) { return RET_OK; @@ -151,4 +151,4 @@ int StridedSliceGradCPUKernelFp16::DoExecute(int task_id) { } REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_StridedSliceGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/strided_slice_fp16_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/strided_slice_fp16_grad.h index 4c901547..abbdc472 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/strided_slice_fp16_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/strided_slice_fp16_grad.h @@ -21,7 +21,7 @@ #include "nnacl_c/fp16_grad/strided_slice_grad.h" #include "src/executor/kernel_exec.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class StridedSliceGradCPUKernelFp16 : public LiteKernel { public: StridedSliceGradCPUKernelFp16(OpParameter *parameter, const std::vector &inputs, @@ -44,6 +44,6 @@ class StridedSliceGradCPUKernelFp16 : public LiteKernel { StridedSliceParameter *param_; std::vector output_shape_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_STRIDED_SLICE_FP16_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/unsorted_segment_sum_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/unsorted_segment_sum_fp16.cc index f6e1eefc..c49b04cf 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/unsorted_segment_sum_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/unsorted_segment_sum_fp16.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_UnsortedSegmentSum; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int UnsortedSegmentSumCPUKernelFp16::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C2NUM); CHECK_LESS_RETURN(out_tensors_.size(), C1NUM); @@ -101,4 +101,4 @@ int UnsortedSegmentSumCPUKernelFp16::DoExecute(int task_id) { REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_UnsortedSegmentSum, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/unsorted_segment_sum_fp16.h b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/unsorted_segment_sum_fp16.h index 65e11e0e..3861a92a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp16_grad/unsorted_segment_sum_fp16.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp16_grad/unsorted_segment_sum_fp16.h @@ -20,7 +20,7 @@ #include #include "src/executor/kernel_exec.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class UnsortedSegmentSumCPUKernelFp16 : public LiteKernel { public: UnsortedSegmentSumCPUKernelFp16(OpParameter *parameter, const std::vector &inputs, @@ -39,6 +39,6 @@ class UnsortedSegmentSumCPUKernelFp16 : public LiteKernel { private: }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP16_GRAD_UNSORTED_SEGMENT_SUM_FP16_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/adder_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/adder_fp32.cc index 10953861..28b0fe52 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/adder_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/adder_fp32.cc @@ -31,7 +31,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::Format; using mindspore::schema::PrimitiveType_AdderFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int AdderCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C2NUM); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -153,5 +153,5 @@ int AdderCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_AdderFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/adder_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/adder_fp32.h index b2b1a7e8..2c9980d7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/adder_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/adder_fp32.h @@ -23,7 +23,7 @@ #include "nnacl_c/op_base.h" #include "src/litert/kernel/cpu/fp32/convolution_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class AdderCPUKernel : public ConvolutionCPUKernel { public: AdderCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -37,7 +37,7 @@ class AdderCPUKernel : public ConvolutionCPUKernel { int Run() override; int RunImpl(int task_id) override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_ADDER_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/affine_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/affine_fp32.cc index a183abc9..abc5b342 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/affine_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/affine_fp32.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_Affine; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int AffineFp32CPUKernel::DoActivation(lite::Tensor *tensor) { auto data = static_cast(tensor->MutableData()); int length = tensor->ElementsNum(); @@ -478,4 +478,4 @@ int AffineFp32CPUKernel::FullMatmulRun() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Affine, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/affine_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/affine_fp32.h index 917c0d8f..1657c9c8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/affine_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/affine_fp32.h @@ -22,7 +22,7 @@ #include "nnacl_c/affine_parameter.h" #include "nnacl_c/splice_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr auto kAffineMinInputNum = 2; constexpr auto kAffineMaxInputNum = 3; constexpr auto kAffineMaxOutputNum = 1; @@ -74,5 +74,5 @@ class AffineFp32CPUKernel : public LiteKernel { int splice_src_col_{0}; int splice_dst_col_{0}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_AFFINE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/all_gather_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/all_gather_fp32.cc index 8f0ad2da..df5fb1f9 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/all_gather_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/all_gather_fp32.cc @@ -23,7 +23,7 @@ using mindspore::kernel::KERNEL_ARCH; using mindspore::lite::KernelRegistrar; using mindspore::schema::PrimitiveType_AllGather; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int AllGatherCPUKernel::Prepare() { MS_LOG(ERROR) << "unsupported AllGather kernel"; return lite::RET_NOT_SUPPORT; @@ -52,4 +52,4 @@ int AllGatherCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_AllGather, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/all_gather_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/all_gather_fp32.h index 80585bf0..4fd91cf7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/all_gather_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/all_gather_fp32.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/all_gather_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class AllGatherCPUKernel : public LiteKernel { public: AllGatherCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -38,6 +38,6 @@ class AllGatherCPUKernel : public LiteKernel { private: AllGatherParameter *param_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_ALL_GATHER_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/arithmetic_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/arithmetic_fp32.cc index 30ef80fa..e1601729 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/arithmetic_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/arithmetic_fp32.cc @@ -37,7 +37,7 @@ using mindspore::schema::PrimitiveType_RealDiv; using mindspore::schema::PrimitiveType_SquaredDifference; using mindspore::schema::PrimitiveType_SubFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ArithmeticCPUKernel::ReSize() { in_data_size_ = static_cast(lite::DataTypeSize(in_tensors_.front()->data_type())); out_data_size_ = static_cast(lite::DataTypeSize(out_tensors_.front()->data_type())); @@ -206,4 +206,4 @@ REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_FloorMod, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Eltwise, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_DivFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/arithmetic_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/arithmetic_fp32.h index 8d70bb59..43c2eb9a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/arithmetic_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/arithmetic_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/base/arithmetic_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ArithmeticCPUKernel : public ArithmeticBaseCPUKernel { public: ArithmeticCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -54,5 +54,5 @@ class ArithmeticCPUKernel : public ArithmeticBaseCPUKernel { ArithmeticFunc arithmetic_run_bool_{nullptr}; ArithmeticOptFunc arithmetic_opt_run_bool_{nullptr}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_ARITHMETIC_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/broadcast_to_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/broadcast_to_fp32.cc index 991a6157..2bde0bc7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/broadcast_to_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/broadcast_to_fp32.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_BroadcastTo; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int BroadcastToCPUKernel::ReSize() { auto input_shape = in_tensors_.at(0)->shape(); for (size_t i = 0; i < input_shape.size(); ++i) { @@ -79,4 +79,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BroadcastTo, LiteKernelCreato REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_BroadcastTo, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_BroadcastTo, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeUInt8, PrimitiveType_BroadcastTo, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/broadcast_to_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/broadcast_to_fp32.h index 76360efd..ff372e31 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/broadcast_to_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/broadcast_to_fp32.h @@ -21,7 +21,7 @@ #include "nnacl_c/base/broadcast_to.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BroadcastToCPUKernel : public LiteKernel { public: BroadcastToCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -36,6 +36,6 @@ class BroadcastToCPUKernel : public LiteKernel { private: BroadcastShapeInfo shape_info_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_BROADCAST_TO_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/cast_for_x86_fp16.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/cast_for_x86_fp16.cc index d4db78f0..9f457c63 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/cast_for_x86_fp16.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/cast_for_x86_fp16.cc @@ -21,6 +21,6 @@ using mindspore::kernel::KERNEL_ARCH; using mindspore::lite::KernelRegistrar; using mindspore::schema::PrimitiveType_Cast; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Cast, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/cast_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/cast_fp32.cc index 08aec60c..139ffae6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/cast_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/cast_fp32.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Cast; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int CastRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { if (cdata == nullptr) { @@ -221,4 +221,4 @@ REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Cast, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt64, PrimitiveType_Cast, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Cast, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/cast_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/cast_fp32.h index c90b2156..5933a90b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/cast_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/cast_fp32.h @@ -23,7 +23,7 @@ #include "nnacl_c/op_base.h" #include "nnacl_c/base/cast_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CastCPUKernel : public LiteKernel { public: CastCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -44,6 +44,6 @@ class CastCPUKernel : public LiteKernel { int stride_ = 0; int data_num_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CAST_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_1x1_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_1x1_fp32.cc index 138740c0..87d0031c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_1x1_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_1x1_fp32.cc @@ -20,7 +20,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { Convolution1x1CPUKernel::~Convolution1x1CPUKernel() { FreeTmpBuffer(); @@ -367,4 +367,4 @@ int Convolution1x1CPUKernel::MallocWeightBiasData() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_1x1_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_1x1_fp32.h index 15efa5b8..14f8cfc0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_1x1_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_1x1_fp32.h @@ -29,7 +29,7 @@ #include "nnacl_c/matmul_parameter.h" #include "nnacl_c/fp32/matmul_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class Convolution1x1CPUKernel : public ConvolutionBaseCPUKernel { public: Convolution1x1CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -65,5 +65,5 @@ class Convolution1x1CPUKernel : public ConvolutionBaseCPUKernel { int row_tile_ = 0; int col_tile_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_1X1_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.cc index 9d26b96c..acfbcb66 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.cc @@ -47,7 +47,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2DFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kMaxDwConvSWSize = 32; } // namespace @@ -410,4 +410,4 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector & } return kernel; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.h index 3fcbf754..5151f34a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_delegate_fp32.h @@ -23,7 +23,7 @@ #include "nnacl_c/op_base.h" using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDelegateCPUKernel : public LiteKernel { public: ConvolutionDelegateCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -134,6 +134,6 @@ class ConvolutionDelegateCPUKernel : public LiteKernel { bool input_const_{false}; bool weight_const_{false}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_DELEGATE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_3x3_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_3x3_fp32.cc index bacd4d52..4328fbc4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_3x3_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_3x3_fp32.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ConvolutionDepthwise3x3CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C2NUM); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -160,5 +160,5 @@ int ConvolutionDepthwise3x3CPUKernel::MallocWeightBiasData() { memset(bias_data_, 0, c4 * sizeof(float)); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_3x3_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_3x3_fp32.h index fa3ed605..4f2a3617 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_3x3_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_3x3_fp32.h @@ -23,7 +23,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwise3x3CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwise3x3CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -45,6 +45,6 @@ class ConvolutionDepthwise3x3CPUKernel : public ConvolutionBaseCPUKernel { float *output_ptr_ = nullptr; float *buffer_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_DEPTHWISE_3X3_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_fp32.cc index 3ea5ac11..8b9553a4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_fp32.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ConvolutionDepthwiseCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C2NUM); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -215,4 +215,4 @@ int ConvolutionDepthwiseCPUKernel::MallocWeightBiasData() { memset(bias_data_, 0, channel * sizeof(float)); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_fp32.h index 8bdef907..4a0712d3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_fp32.h @@ -23,7 +23,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -63,6 +63,6 @@ class ConvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { float *output_ptr_ = nullptr; ConvDwCalcParam *conv_dw_calc_param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_DEPTHWISE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_indirect_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_indirect_fp32.cc index 0abb382a..07c7a55b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_indirect_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_indirect_fp32.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { ConvolutionDepthwiseIndirectCPUKernel::~ConvolutionDepthwiseIndirectCPUKernel() { if (zero_ptr_ != nullptr) { free(zero_ptr_); @@ -240,4 +240,4 @@ int ConvolutionDepthwiseIndirectCPUKernel::MallocWeightBiasData() { memset(zero_ptr_, 0, batch_flag * div_flag * sizeof(float)); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_indirect_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_indirect_fp32.h index ae08a9de..1e4b63e4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_indirect_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_indirect_fp32.h @@ -22,7 +22,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwiseIndirectCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseIndirectCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -49,6 +49,6 @@ class ConvolutionDepthwiseIndirectCPUKernel : public ConvolutionBaseCPUKernel { float *output_ptr_ = nullptr; float *packed_input_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_DEPTHWISE_INDIRECT_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_fp32.cc index 18ce636d..b310e56a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_fp32.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { ConvolutionDepthwiseSWCPUKernel::~ConvolutionDepthwiseSWCPUKernel() { if (sliding_ != nullptr) { delete sliding_; @@ -211,4 +211,4 @@ int ConvolutionDepthwiseSWCPUKernel::MallocWeightBiasData() { conv_param_->thread_num_ = MSMIN(thread_count_, OC4); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_fp32.h index ecc94cd9..6d109dbe 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_fp32.h @@ -22,7 +22,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwiseSWCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseSWCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,6 +47,6 @@ class ConvolutionDepthwiseSWCPUKernel : public ConvolutionBaseCPUKernel { float *packed_output_ = nullptr; bool need_align_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_DEPTHWISE_SLIDEWINDOW_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_x86_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_x86_fp32.cc index fbde537b..eff8a3c8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_x86_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_x86_fp32.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { ConvolutionDepthwiseSWCPUKernelX86::~ConvolutionDepthwiseSWCPUKernelX86() { if (sliding_ != nullptr) { delete sliding_; @@ -213,5 +213,5 @@ int ConvolutionDepthwiseSWCPUKernelX86::MallocWeightBiasData() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_x86_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_x86_fp32.h index d6b7ca98..0c2b5bb5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_x86_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_depthwise_slidewindow_x86_fp32.h @@ -22,7 +22,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwiseSWCPUKernelX86 : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseSWCPUKernelX86(OpParameter *parameter, const std::vector &inputs, @@ -49,7 +49,7 @@ class ConvolutionDepthwiseSWCPUKernelX86 : public ConvolutionBaseCPUKernel { bool input_need_align_ = false; bool output_need_align_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_DEPTHWISE_SLIDEWINDOW_X86_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_fp32.cc index 6ce67127..a1338e15 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_fp32.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { #define CONV_MIN_CALC_BLOCK C1NUM #ifdef ENABLE_AVX #define OC_BLOCK C16NUM @@ -294,5 +294,5 @@ int ConvolutionCPUKernel::MallocWeightBiasData() { memset(bias_data_, 0, oc_block_num * sizeof(float)); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_fp32.h index 5ca88340..14bc834d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_fp32.h @@ -23,7 +23,7 @@ #include "nnacl_c/op_base.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -65,7 +65,7 @@ class ConvolutionCPUKernel : public ConvolutionBaseCPUKernel { float *col_major_input_ = nullptr; bool output_need_align_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm32_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm32_fp32.cc index 1e4035eb..c9e55f2a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm32_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm32_fp32.cc @@ -16,11 +16,11 @@ #include "src/litert/kernel/cpu/fp32/convolution_im2col_arm32_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionIm2ColARM32CPUKernel::InitGlobalVariable() { oc_tile_ = C4NUM; row_tile_ = C12NUM; rowMajor2ColNMajorFunc = RowMajor2Col4Major; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm32_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm32_fp32.h index ad444d12..7e650b13 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm32_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm32_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionIm2ColARM32CPUKernel : public ConvolutionIm2ColBaseCPUKernel { public: ConvolutionIm2ColARM32CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -31,6 +31,6 @@ class ConvolutionIm2ColARM32CPUKernel : public ConvolutionIm2ColBaseCPUKernel { void InitGlobalVariable() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_IM2COL_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm64_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm64_fp32.cc index de407f71..15fd699c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm64_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm64_fp32.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionIm2ColARM64CPUKernel::InitGlobalVariable() { oc_tile_ = C8NUM; row_tile_ = C12NUM; @@ -46,4 +46,4 @@ int ConvolutionIm2ColARM64CPUKernel::RunImpl(int task_id) { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm64_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm64_fp32.h index 3dc83208..7c9e23a5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm64_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_arm64_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionIm2ColARM64CPUKernel : public ConvolutionIm2ColBaseCPUKernel { public: ConvolutionIm2ColARM64CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -32,6 +32,6 @@ class ConvolutionIm2ColARM64CPUKernel : public ConvolutionIm2ColBaseCPUKernel { void InitGlobalVariable() override; int RunImpl(int task_id) override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_IM2COL_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx512_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx512_fp32.cc index 0b7ba386..63fba568 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx512_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx512_fp32.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionIm2ColAVX512CPUKernel::InitGlobalVariable() { oc_tile_ = C16NUM; row_tile_ = MSMIN(UP_DIV(conv_param_->output_h_ * conv_param_->output_w_, op_parameter_->thread_num_), C150NUM); @@ -129,4 +129,4 @@ int ConvolutionIm2ColAVX512CPUKernel::Run() { FreeTmpBuffer(); return ret; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx512_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx512_fp32.h index f16bee77..a882757e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx512_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx512_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionIm2ColAVX512CPUKernel : public ConvolutionIm2ColBaseCPUKernel { public: ConvolutionIm2ColAVX512CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -34,6 +34,6 @@ class ConvolutionIm2ColAVX512CPUKernel : public ConvolutionIm2ColBaseCPUKernel { int RunImpl(int task_id) override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_IM2COL_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx_fp32.cc index aaf7f597..6d341eeb 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx_fp32.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionIm2ColAVXCPUKernel::InitGlobalVariable() { oc_tile_ = C16NUM; row_tile_ = C6NUM; @@ -134,4 +134,4 @@ int ConvolutionIm2ColAVXCPUKernel::Run() { FreeTmpBuffer(); return ret; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx_fp32.h index 818516e6..a49f5660 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_avx_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionIm2ColAVXCPUKernel : public ConvolutionIm2ColBaseCPUKernel { public: ConvolutionIm2ColAVXCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,6 +35,6 @@ class ConvolutionIm2ColAVXCPUKernel : public ConvolutionIm2ColBaseCPUKernel { int Run() override; int RunImpl(int task_id) override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_IM2COL_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.cc index 9484973a..7190b6bc 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { #define CONV_MIN_CALC_BLOCK C1NUM void ConvolutionIm2ColBaseCPUKernel::InitGlobalVariable() { @@ -240,4 +240,4 @@ int ConvolutionIm2ColBaseCPUKernel::MallocWeightBiasData() { memset(bias_data_, 0, oc_block_num * sizeof(float)); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.h index 70d00e87..175fca9d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.h @@ -22,7 +22,7 @@ #include "nnacl_c/op_base.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { using RowMajor2ColNMajorFunc = void (*)(const float *src_ptr, float *dst_ptr, int row, int col); int ConvolutionIm2ColImpl(void *cdata, int task_id, float lhs_scale, float rhs_scale); @@ -72,6 +72,6 @@ class ConvolutionIm2ColBaseCPUKernel : public ConvolutionBaseCPUKernel { int row_tile_ = C12NUM; // oc tile is C12NUM in C RowMajor2ColNMajorFunc rowMajor2ColNMajorFunc = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_IM2COL_BASE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_fp32.cc index 908af5b0..a31b9ad2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_fp32.cc @@ -36,7 +36,7 @@ #endif #include "nnacl_c/intrinsics/ms_simd_cpu_info.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { LiteKernel *CreateConvolutionIm2ColCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::InnerContext *ctx, float *origin_weight, float *origin_bias) { @@ -82,4 +82,4 @@ LiteKernel *CreateConvolutionIm2ColCPUKernel(OpParameter *parameter, const std:: } return kernel; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_fp32.h index e1b22015..57345759 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_fp32.h @@ -22,9 +22,9 @@ #include "nnacl_c/op_base.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { LiteKernel *CreateConvolutionIm2ColCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::InnerContext *ctx, float *origin_weight, float *origin_bias); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_IM2COL_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_sse_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_sse_fp32.cc index 7d86b0cf..df81aeb3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_sse_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_sse_fp32.cc @@ -16,11 +16,11 @@ #include "src/litert/kernel/cpu/fp32/convolution_im2col_sse_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionIm2ColSSECPUKernel::InitGlobalVariable() { oc_tile_ = C8NUM; row_tile_ = C4NUM; rowMajor2ColNMajorFunc = RowMajor2Col8Major; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_sse_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_sse_fp32.h index ccb3de77..fae6b212 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_sse_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_im2col_sse_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_im2col_base_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionIm2ColSSECPUKernel : public ConvolutionIm2ColBaseCPUKernel { public: ConvolutionIm2ColSSECPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -31,6 +31,6 @@ class ConvolutionIm2ColSSECPUKernel : public ConvolutionIm2ColBaseCPUKernel { void InitGlobalVariable() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_IM2COL_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_arm64_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_arm64_fp32.cc index 11138879..6189afe1 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_arm64_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_arm64_fp32.cc @@ -16,7 +16,7 @@ #include "src/litert/kernel/cpu/fp32/convolution_slidewindow_arm64_fp32.h" #include "nnacl_c/fp32/conv_sw_arm64_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionSWARM64CPUKernel::InitGlobalVariable() { oc_tile_ = C8NUM; oc_res_ = conv_param_->output_channel_ % oc_tile_; @@ -27,4 +27,4 @@ int ConvolutionSWARM64CPUKernel::RunImpl(int task_id) { output_data_, task_id, conv_param_, slidingWindow_param_); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_arm64_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_arm64_fp32.h index b2af75e2..3931375e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_arm64_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_arm64_fp32.h @@ -18,7 +18,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_slidewindow_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionSWARM64CPUKernel : public ConvolutionSWCPUKernel { public: ConvolutionSWARM64CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -29,5 +29,5 @@ class ConvolutionSWARM64CPUKernel : public ConvolutionSWCPUKernel { void InitGlobalVariable() override; int RunImpl(int task_id) override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_FP32_CONVOLUTION_SLIDEWINDOW_ARM64_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_avx_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_avx_fp32.cc index 54dcd0a2..bebeadd2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_avx_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_avx_fp32.cc @@ -18,7 +18,7 @@ #include "nnacl_c/fp32/conv_common_fp32.h" #include "nnacl_c/fp32/conv_1x1_x86_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionSWAVXCPUKernel::InitGlobalVariable() { oc_tile_ = C8NUM; oc_res_ = conv_param_->output_channel_ % oc_tile_; @@ -39,5 +39,5 @@ int ConvolutionSWAVXCPUKernel::RunImpl(int task_id) { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // ENABLE_AVX diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_avx_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_avx_fp32.h index 606b411a..58956858 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_avx_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_avx_fp32.h @@ -19,7 +19,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_slidewindow_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionSWAVXCPUKernel : public ConvolutionSWCPUKernel { public: ConvolutionSWAVXCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,6 +30,6 @@ class ConvolutionSWAVXCPUKernel : public ConvolutionSWCPUKernel { void InitGlobalVariable() override; int RunImpl(int task_id) override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // ENABLE_AVX #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_SLIDEWINDOW_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_fp32.cc index 4026cf85..393df0cf 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_fp32.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionSWCPUKernel::InitGlobalVariable() { oc_tile_ = C1NUM; oc_res_ = conv_param_->output_channel_ % oc_tile_; @@ -236,5 +236,5 @@ int ConvolutionSWCPUKernel::MallocWeightBiasData() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // ENABLE_AVX diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_fp32.h index 6f8f4080..ca7992a6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_slidewindow_fp32.h @@ -21,7 +21,7 @@ #include "nnacl_c/op_base.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionSWCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionSWCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -66,6 +66,6 @@ class ConvolutionSWCPUKernel : public ConvolutionBaseCPUKernel { float *input_data_ = nullptr; SlidingWindowParam *slidingWindow_param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // ENABLE_AVX #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_SLIDEWINDOW_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_sw_1x1_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_sw_1x1_fp32.cc index 728f81d5..48600b91 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_sw_1x1_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_sw_1x1_fp32.cc @@ -22,7 +22,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ConvolutionSW1x1CPUKernel::Prepare() { CHECK_NULL_RETURN(matmul_base_); matmul_base_->set_name(name_); @@ -43,4 +43,4 @@ int ConvolutionSW1x1CPUKernel::Run() { matmul_base_->set_workspace(workspace()); return matmul_base_->Run(); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_sw_1x1_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_sw_1x1_fp32.h index 071d21d3..f76afa70 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_sw_1x1_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_sw_1x1_fp32.h @@ -25,7 +25,7 @@ #include "src/litert/kernel/cpu/fp32/matmul_fp32_base.h" #include "src/litert/kernel/cpu/fp32/matmul_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionSW1x1CPUKernel : public LiteKernel { public: ConvolutionSW1x1CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -96,5 +96,5 @@ class ConvolutionSW1x1CPUKernel : public LiteKernel { float *origin_weight_ = nullptr; float *origin_bias_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_SW_1X1_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm32_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm32_fp32.cc index 2f22126d..2c74d423 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm32_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm32_fp32.cc @@ -16,10 +16,10 @@ #include "src/litert/kernel/cpu/fp32/convolution_winograd_arm32_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionWinogradARM32CPUKernel::InitGlobalVariable() { oc_block_ = C8NUM; tmp_data_tile_ = C4NUM; tile_num_ = C12NUM; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm32_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm32_fp32.h index d3cd9a21..8afec9ab 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm32_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm32_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionWinogradARM32CPUKernel : public ConvolutionWinogradBaseCPUKernel { public: ConvolutionWinogradARM32CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,5 +30,5 @@ class ConvolutionWinogradARM32CPUKernel : public ConvolutionWinogradBaseCPUKerne ~ConvolutionWinogradARM32CPUKernel() override {} void InitGlobalVariable() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_ARM32_FP32_CONVOLUTION_WINOGRAD_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm64_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm64_fp32.cc index ee8ea71c..aaa0de35 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm64_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm64_fp32.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionWinogradARM64CPUKernel::InitGlobalVariable() { oc_block_ = C8NUM; tmp_data_tile_ = C4NUM; @@ -52,4 +52,4 @@ int ConvolutionWinogradARM64CPUKernel::ConfigInputOutput() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm64_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm64_fp32.h index e72a0cbe..3a15e0de 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm64_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_arm64_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionWinogradARM64CPUKernel : public ConvolutionWinogradBaseCPUKernel { public: ConvolutionWinogradARM64CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -31,5 +31,5 @@ class ConvolutionWinogradARM64CPUKernel : public ConvolutionWinogradBaseCPUKerne void InitGlobalVariable() override; int ConfigInputOutput() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_ARM64_FP32_CONVOLUTION_WINOGRAD_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_avx_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_avx_fp32.cc index 1837425a..0f21ecad 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_avx_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_avx_fp32.cc @@ -24,10 +24,10 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionWinogradAVXCPUKernel::InitGlobalVariable() { oc_block_ = C16NUM; tmp_data_tile_ = C8NUM; tile_num_ = C12NUM; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_avx_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_avx_fp32.h index 90c0c5c4..a77c1bfb 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_avx_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_avx_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionWinogradAVXCPUKernel : public ConvolutionWinogradBaseCPUKernel { public: ConvolutionWinogradAVXCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,5 +30,5 @@ class ConvolutionWinogradAVXCPUKernel : public ConvolutionWinogradBaseCPUKernel ~ConvolutionWinogradAVXCPUKernel() override {} void InitGlobalVariable() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_AVX_FP32_CONVOLUTION_WINOGRAD_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.cc index e77872c1..3f084175 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { #define CONV_MIN_CALC_BLOCK C1NUM void ConvolutionWinogradBaseCPUKernel::InitGlobalVariable() { oc_block_ = C8NUM; @@ -305,4 +305,4 @@ void ConvolutionWinogradBaseCPUKernel::PackWeight() { MS_ASSERT(origin_weight != nullptr); WinogradFilterTransform(reinterpret_cast(origin_weight), matrix_g_, matrix_gt_, oc_block_); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.h index 0968c500..b539830f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.h @@ -25,7 +25,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #define CONV_INPUT_UNIT_SIZE 8 -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionWinogradBaseCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionWinogradBaseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -90,5 +90,5 @@ class ConvolutionWinogradBaseCPUKernel : public ConvolutionBaseCPUKernel { TmpBufferAddress tmp_buffer_address_list_[5] = {nullptr}; TransFuncList trans_func_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_BASE_FP32_CONVOLUTION_WINOGRAD_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_fp32.cc index c6a2e2e1..eefdd54d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_fp32.cc @@ -33,7 +33,7 @@ #endif #include "nnacl_c/intrinsics/ms_simd_cpu_info.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { LiteKernel *CreateConvolutionWinogradCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::InnerContext *ctx, int out_unit, float *origin_weight, @@ -72,4 +72,4 @@ LiteKernel *CreateConvolutionWinogradCPUKernel(OpParameter *parameter, const std } return kernel; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_fp32.h index 67c69734..be5f680e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_fp32.h @@ -22,10 +22,10 @@ #include "nnacl_c/op_base.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { LiteKernel *CreateConvolutionWinogradCPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::InnerContext *ctx, int out_unit, float *origin_weight, float *origin_bias); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CONVOLUTION_WINOGRAD_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_sse_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_sse_fp32.cc index b7a092a9..b642fec4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_sse_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_sse_fp32.cc @@ -16,10 +16,10 @@ #include "src/litert/kernel/cpu/fp32/convolution_winograd_sse_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionWinogradSSECPUKernel::InitGlobalVariable() { oc_block_ = C8NUM; tmp_data_tile_ = C4NUM; tile_num_ = C12NUM; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_sse_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_sse_fp32.h index 703221c8..3f3a1af0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_sse_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/convolution_winograd_sse_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/convolution_winograd_base_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionWinogradSSECPUKernel : public ConvolutionWinogradBaseCPUKernel { public: ConvolutionWinogradSSECPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,5 +30,5 @@ class ConvolutionWinogradSSECPUKernel : public ConvolutionWinogradBaseCPUKernel ~ConvolutionWinogradSSECPUKernel() override {} void InitGlobalVariable() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_SSE_FP32_CONVOLUTION_WINOGRAD_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/cumsum_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/cumsum_fp32.cc index 969ecc6c..60557c26 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/cumsum_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/cumsum_fp32.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_CumSum; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int CumsumLaunch(void *cdata, int task_id, float lhs_scale, float rhs_scale) { if (cdata == nullptr) { @@ -146,4 +146,4 @@ int CumSumCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_CumSum, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_CumSum, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/cumsum_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/cumsum_fp32.h index d26af0b5..e21c4cb8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/cumsum_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/cumsum_fp32.h @@ -21,7 +21,7 @@ #include "nnacl_c/cumsum_parameter.h" #include "src/executor/kernel_exec.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CumSumCPUKernel : public LiteKernel { public: CumSumCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -44,5 +44,5 @@ class CumSumCPUKernel : public LiteKernel { int unit_ = 1; CumSumParameter *param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_CUMSUM_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/custom_gru_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/custom_gru_fp32.cc index 1b9d7e6d..53b8b329 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/custom_gru_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/custom_gru_fp32.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NOT_SUPPORT; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { CustomGruCPUKernel::~CustomGruCPUKernel() { if (weight_in_) { lite::PackWeightManager::GetInstance()->Free(weight_in_); @@ -247,5 +247,5 @@ int CustomGruCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimType_Inner_CustomGru, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/custom_gru_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/custom_gru_fp32.h index 75df5165..e8213987 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/custom_gru_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/custom_gru_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CustomGruCPUKernel : public LiteKernel { public: CustomGruCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -46,6 +46,6 @@ class CustomGruCPUKernel : public LiteKernel { void *init_h_{nullptr}; void *run_buffer_{nullptr}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_FP32_CUSTOM_GRU_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_depthwise_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_depthwise_fp32.cc index 35dcb371..7768ebb7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_depthwise_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_depthwise_fp32.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { DeconvolutionDepthwiseCPUKernel::~DeconvolutionDepthwiseCPUKernel() { if (sliding_ != nullptr) { delete sliding_; @@ -256,5 +256,5 @@ void DeconvolutionDepthwiseCPUKernel::FreePackedInputOutput() { packed_output_ = nullptr; } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_depthwise_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_depthwise_fp32.h index de8298cb..32ec9f8f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_depthwise_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_depthwise_fp32.h @@ -23,7 +23,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DeconvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { public: DeconvolutionDepthwiseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -49,7 +49,7 @@ class DeconvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { float *packed_output_ = nullptr; bool need_align_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_DECONVOLUTION_DEPTHWISE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_fp32.cc index b9fcfb9a..c9474270 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_fp32.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { DeConvolutionCPUKernel::~DeConvolutionCPUKernel() { if (matmul_param_ != nullptr) { delete matmul_param_; @@ -391,5 +391,5 @@ kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Conv2dTransposeFusion, CpuDeConvFp32KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_fp32.h index af0579a7..4396204c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_fp32.h @@ -28,7 +28,7 @@ #include "nnacl_c/fp32/deconv_fp32.h" #include "nnacl_c/fp32/matmul_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { #define DECONV_WINOGRAD_MAX 2000 class DeConvolutionCPUKernel : public ConvolutionBaseCPUKernel { public: @@ -65,6 +65,6 @@ class DeConvolutionCPUKernel : public ConvolutionBaseCPUKernel { float *input_ptr_ = nullptr; float *output_ptr_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_DECONVOLUTION_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_winograd_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_winograd_fp32.cc index fdbb9ac6..ec4d1cc2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_winograd_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_winograd_fp32.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { const int kDeconvWinogradMaxPixel = 3145728; DeConvolutionWinogradCPUKernel::~DeConvolutionWinogradCPUKernel() { FreeResizeBuf(); @@ -595,5 +595,5 @@ int DeConvolutionWinogradCPUKernel::Run() { FreeRunBuf(); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_winograd_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_winograd_fp32.h index 1aff6a33..2c3fafa6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_winograd_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/deconvolution_winograd_fp32.h @@ -28,7 +28,7 @@ #include "nnacl_c/fp32/deconv_winograd_fp32.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DeConvolutionWinogradCPUKernel : public ConvolutionBaseCPUKernel { public: DeConvolutionWinogradCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -65,6 +65,6 @@ class DeConvolutionWinogradCPUKernel : public ConvolutionBaseCPUKernel { int thread_stride_hw_ = 0; bool valid_weight_shape_ = true; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_DECONVOLUTION_WINOGRAD_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/detection_post_process_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/detection_post_process_fp32.cc index b29aa3fd..29257a0a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/detection_post_process_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/detection_post_process_fp32.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_DetectionPostProcess; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int DetectionPostProcessCPUKernel::GetInputData() { CHECK_LESS_RETURN(in_tensors_.size(), C2NUM); if ((in_tensors_.at(0)->data_type() != kNumberTypeFloat32 && in_tensors_.at(0)->data_type() != kNumberTypeFloat) || @@ -41,4 +41,4 @@ int DetectionPostProcessCPUKernel::GetInputData() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_DetectionPostProcess, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/detection_post_process_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/detection_post_process_fp32.h index d641c8c0..46b8feb5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/detection_post_process_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/detection_post_process_fp32.h @@ -24,7 +24,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DetectionPostProcessCPUKernel : public DetectionPostProcessBaseCPUKernel { public: DetectionPostProcessCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,5 +35,5 @@ class DetectionPostProcessCPUKernel : public DetectionPostProcessBaseCPUKernel { private: int GetInputData() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_DETECTION_POST_PROCESS_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/embedding_lookup_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/embedding_lookup_fp32.cc index 045366ec..87c85dd7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/embedding_lookup_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/embedding_lookup_fp32.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_EmbeddingLookupFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int EmbeddingLookupCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C2NUM); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -124,4 +124,4 @@ void EmbeddingLookupCPUKernel::FreeRunBuff() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_EmbeddingLookupFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/embedding_lookup_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/embedding_lookup_fp32.h index 6813374c..5c6a9aa4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/embedding_lookup_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/embedding_lookup_fp32.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp32/embedding_lookup_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class EmbeddingLookupCPUKernel : public LiteKernel { public: explicit EmbeddingLookupCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -41,6 +41,6 @@ class EmbeddingLookupCPUKernel : public LiteKernel { EmbeddingLookupParameter *param_ = nullptr; float *input_addr_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_EMBEDDING_LOOKUP_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/glu_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/glu_fp32.cc index c97df7cc..94bea238 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/glu_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/glu_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_GLU; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { const int kGluBranchNum = 2; int GluCPUKernel::MallocTmpBuffer() { FreeTmpBuffer(); @@ -197,4 +197,4 @@ int GluCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_GLU, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/glu_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/glu_fp32.h index 903e8fe7..51aa2d09 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/glu_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/glu_fp32.h @@ -26,7 +26,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr size_t kSplitNum = 2; class GluCPUKernel : public LiteKernel { @@ -62,6 +62,6 @@ class GluCPUKernel : public LiteKernel { int usable_thread_num_ = 0; int num_unit_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GLU_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.cc index 62347c45..fe8b366f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.cc @@ -20,7 +20,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int GroupConvolutionFp32CPUKernel::Separate(const int &task_id) const { auto plane_step = UP_DIV(in_plane_, in_thread_num_); MS_CHECK_INT_MUL_NOT_OVERFLOW(plane_step, task_id, RET_ERROR); @@ -136,4 +136,4 @@ int GroupConvolutionFp32CPUKernel::Prepare() { } return GroupConvolutionBaseCPUKernel::Prepare(); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.h index de04f902..e8a8b4f7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/group_convolution_fp32.h @@ -23,7 +23,7 @@ #include "nnacl_c/op_base.h" #include "src/litert/kernel/cpu/base/group_convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class GroupConvolutionFp32CPUKernel : public GroupConvolutionBaseCPUKernel { public: GroupConvolutionFp32CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -46,6 +46,6 @@ class GroupConvolutionFp32CPUKernel : public GroupConvolutionBaseCPUKernel { float *sub_out_src_ = nullptr; float *sub_out_dst_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GROUP_CONVOLUTION_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/gru_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/gru_fp32.cc index 7c7a8720..2cf9ee2b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/gru_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/gru_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_GRU; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void GruCPUKernel::FreeTmpBuffer() { if (weight_g_ptr_ != nullptr) { free(weight_g_ptr_); @@ -294,4 +294,4 @@ int GruCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_GRU, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/gru_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/gru_fp32.h index 0736f4ff..f73abf8c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/gru_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/gru_fp32.h @@ -19,7 +19,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/gru_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class GruCPUKernel : public LiteKernel { public: GruCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -63,6 +63,6 @@ class GruCPUKernel : public LiteKernel { bool is_vec_ = false; GruParameter *gru_param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRU_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.cc index 83765b80..c83f272d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_InstanceNorm; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int InstanceNormCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), DIMENSION_3D); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -140,4 +140,4 @@ int InstanceNormCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_InstanceNorm, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.h index 1ce9cec6..5773d455 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/instance_norm_fp32.h @@ -21,7 +21,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class InstanceNormCPUKernel : public LiteKernel { public: InstanceNormCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -52,6 +52,6 @@ class InstanceNormCPUKernel : public LiteKernel { float *beta_data_ = nullptr; bool input_pack_to_nc4hw4_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_INSTANCE_NORM_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.cc index f15019d4..1acc84d4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_InvertPermutation; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int InvertPermutationCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -70,4 +70,4 @@ int InvertPermutationCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_InvertPermutation, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_InvertPermutation, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_InvertPermutation, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.h index 9a7cd9cb..9d756b9d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/invert_permutation_fp32.h @@ -22,7 +22,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class InvertPermutationCPUKernel : public LiteKernel { public: InvertPermutationCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,6 +35,6 @@ class InvertPermutationCPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_INVERT_PERMUTATION_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/l2_norm_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/l2_norm_fp32.cc index 67b2771a..1abb720f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/l2_norm_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/l2_norm_fp32.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_L2NormalizeFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const int kMaxThreadNum = 8; } @@ -203,4 +203,4 @@ int L2NormCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_L2NormalizeFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/l2_norm_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/l2_norm_fp32.h index eb09b3b6..67c9b363 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/l2_norm_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/l2_norm_fp32.h @@ -25,7 +25,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class L2NormCPUKernel : public LiteKernel { public: L2NormCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -54,6 +54,6 @@ class L2NormCPUKernel : public LiteKernel { float *output_ptr_ = nullptr; float *tmp_sum_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_L2_NORM_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32.cc index aef6a970..76404ebc 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LSTM; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr size_t kMindirInputTensorNum = 4; } @@ -55,4 +55,4 @@ LiteKernel *LstmFp32KernelCreator(const std::vector &inputs, con return kernel; } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LSTM, LstmFp32KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32_base.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32_base.cc index d5975a8d..75337523 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32_base.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr size_t kMindirInputTensorNum = 4; constexpr int kGateNum = 4; @@ -395,4 +395,4 @@ void LstmFp32BaseCPUKernel::LstmBackwardLoop(float *buffer[]) { LstmUnidirectional(backward_output, backward_weight_h, backward_state_bias, backward_hidden_state, backward_cell_state, backward_weight_project, intermediate_states, buffer, true); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32_base.h b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32_base.h index 2f96c661..71877c17 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_fp32_base.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp32/lstm_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class LstmFp32BaseCPUKernel : public LiteKernel { public: LstmFp32BaseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -73,6 +73,6 @@ class LstmFp32BaseCPUKernel : public LiteKernel { float *buffer_forward_[C9NUM] = {nullptr}; float *buffer_backward_[C9NUM] = {nullptr}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_FP32_LSTM_FP32_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_mindir_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_mindir_fp32.cc index 97ccf931..526dd752 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_mindir_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_mindir_fp32.cc @@ -17,7 +17,7 @@ #include "src/litert/kernel/cpu/fp32/lstm_mindir_fp32.h" #include "nnacl_c/fp32/pack_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kInputGateIndex = 0; constexpr int kTempHiddenOutputIndex = 8; @@ -263,4 +263,4 @@ void LstmMindirFp32CPUKernel::RecordStates(const float *hidden_state, float *cel stride += seq_stride; memcpy(states + stride, cell_gate, state_size * sizeof(float)); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_mindir_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_mindir_fp32.h index 1704b7e3..a7673df6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_mindir_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_mindir_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/lstm_fp32_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { /* * 1. LSTM without project, output_size = hidden_size * h_init: second input, shape is [bidirectional, batch_size, hidden_size] @@ -58,6 +58,6 @@ class LstmMindirFp32CPUKernel : public LstmFp32BaseCPUKernel { float *forget_gate, const float *cell_gate, float *intermediate_states, int step); bool gpu_orig_state_{false}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_FP32_LSTM_MINDIR_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_non_mindir_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_non_mindir_fp32.cc index 317ea2cf..3f091412 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_non_mindir_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_non_mindir_fp32.cc @@ -17,7 +17,7 @@ #include "src/litert/kernel/cpu/fp32/lstm_non_mindir_fp32.h" #include "nnacl_c/fp32/pack_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kInputGateIndex = 0; constexpr int kGateNum = 4; @@ -170,4 +170,4 @@ void LstmNonMindirFp32CPUKernel::LstmUnidirectional(float *output, const float * weight_project, hidden_state, cell_state, buffer, lstm_param_); } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_non_mindir_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_non_mindir_fp32.h index e18d06b9..b79a373f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_non_mindir_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/lstm_non_mindir_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/fp32/lstm_fp32_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { /* * 1. LSTM without project, output_size = hidden_size * weight_ih: second input, shape is [bidirectional, 4 * hidden_size, input_size] @@ -56,6 +56,6 @@ class LstmNonMindirFp32CPUKernel : public LstmFp32BaseCPUKernel { float *cell_state, const float *weight_project, float *intermediate_states, float *buffer[], bool is_backward) override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_FP32_LSTM_NON_MINDIR_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32.cc index 42cea273..e6480aa5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32.cc @@ -48,7 +48,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_MatMulFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int MatmulCPUKernel::Prepare() { CHECK_NULL_RETURN(matmul_base_); matmul_base_->set_name(name_); @@ -115,4 +115,4 @@ int MatmulCPUKernel::PreparePackedWeight(const lite::Tensor *tensor) { matmul_base_->SetWeightIsPacked(true); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32.h index 1f0f3403..0e75df04 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32.h @@ -21,7 +21,7 @@ #include "nnacl_c/matmul_parameter.h" #include "src/litert/kernel/cpu/fp32/matmul_fp32_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { MatmulFp32BaseCPUKernel *CreateMatmulFp32CPUKernel(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::InnerContext *ctx); @@ -93,5 +93,5 @@ class MatmulCPUKernel : public LiteKernel { private: MatmulFp32BaseCPUKernel *matmul_base_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_MATMUL_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm32.cc index 55cd42fb..235af37c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm32.cc @@ -20,7 +20,7 @@ #include "nnacl_c/fp32/matmul_fp32.h" #include "nnacl_c/fp32/pack_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void MatmulFp32ARM32CPUKernel::InitGlobalVariable() { matrix_a_.need_pack = true; matrix_b_.need_pack = true; @@ -101,5 +101,5 @@ int MatmulFp32ARM32CPUKernel::ParallelRunByOC(int task_id) const { } bool MatmulFp32ARM32CPUKernel::CheckThreadCuttingByRow() { return false; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm32.h index 95d0200c..da2b199f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm32.h @@ -19,7 +19,7 @@ #ifdef ENABLE_ARM32 #include #include "src/litert/kernel/cpu/fp32/matmul_fp32_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatmulFp32ARM32CPUKernel : public MatmulFp32BaseCPUKernel { public: MatmulFp32ARM32CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -36,6 +36,6 @@ class MatmulFp32ARM32CPUKernel : public MatmulFp32BaseCPUKernel { int ParallelRunByOC(int task_id) const override; bool CheckThreadCuttingByRow() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_MATMUL_FP32_ARM32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm64.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm64.cc index 902c01fc..ba762da8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm64.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm64.cc @@ -22,7 +22,7 @@ #include "nnacl_c/fp32/pack_fp32.h" #include "nnacl_c/fp32/pack_fp32_opt.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int64_t kPackAMinUnitNum = 1 << 13; } // namespace @@ -171,5 +171,5 @@ bool MatmulFp32ARM64CPUKernel::CheckThreadCuttingByRow() { } return false; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm64.h b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm64.h index 021ba6e2..031f82a4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm64.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_arm64.h @@ -20,7 +20,7 @@ #ifdef ENABLE_ARM64 #include #include "src/litert/kernel/cpu/fp32/matmul_fp32_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatmulFp32ARM64CPUKernel : public MatmulFp32BaseCPUKernel { public: MatmulFp32ARM64CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -37,7 +37,7 @@ class MatmulFp32ARM64CPUKernel : public MatmulFp32BaseCPUKernel { int ParallelRunByOC(int task_id) const override; bool CheckThreadCuttingByRow() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_MATMUL_FP32_ARM64_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx.cc index 401bff22..855b2b76 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx.cc @@ -20,7 +20,7 @@ #include "nnacl_c/fp32/matmul_fp32.h" #include "nnacl_c/fp32/pack_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void MatmulFp32AVXCPUKernel::InitGlobalVariable() { matrix_a_.need_pack = true; matrix_b_.need_pack = true; @@ -152,5 +152,5 @@ bool MatmulFp32AVXCPUKernel::CheckThreadCuttingByRow() { return MSMIN(row_num_ / row_min_unit_, op_parameter_->thread_num_) > MSMIN(col_step_ / col_min_unit_, op_parameter_->thread_num_); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx.h b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx.h index 0978df91..76f537f7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx.h @@ -20,7 +20,7 @@ #if defined(ENABLE_AVX) #include #include "src/litert/kernel/cpu/fp32/matmul_fp32_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatmulFp32AVXCPUKernel : public MatmulFp32BaseCPUKernel { public: MatmulFp32AVXCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -38,7 +38,7 @@ class MatmulFp32AVXCPUKernel : public MatmulFp32BaseCPUKernel { bool CheckThreadCuttingByRow() override; bool SupportMulBatchCuttingByRow() { return true; } }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_MATMUL_FP32_AVX_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx512.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx512.cc index bd96027f..44055e9d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx512.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx512.cc @@ -22,7 +22,7 @@ #include "nnacl_c/fp32/matmul_fp32.h" #include "nnacl_c/fp32/pack_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { size_t min_calc_cost_ = 1 * 6 * 64 * 64; } @@ -731,5 +731,5 @@ bool MatmulFp32AVX512CPUKernel::CheckThreadCuttingByRow() { } return MSMIN(row_num_ / row_min_unit_, thread_num_) > MSMIN(col_step_ / col_min_unit_, thread_num_); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx512.h b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx512.h index 8bc1afbf..0ff7482f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx512.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_avx512.h @@ -20,7 +20,7 @@ #ifdef ENABLE_AVX512 #include #include "src/litert/kernel/cpu/fp32/matmul_fp32_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { struct MatmulSlice { int row_s_ = 0; int row_e_ = 0; @@ -58,7 +58,7 @@ class MatmulFp32AVX512CPUKernel : public MatmulFp32BaseCPUKernel { std::vector> matmul_slice_set_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_MATMUL_FP32_AVX512_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_base.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_base.cc index 90d365b8..d46ec4ad 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_base.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_base.cc @@ -30,7 +30,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_MatMulFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int MatmulRun(void *cdata, int task_id, float, float) { CHECK_NULL_RETURN(cdata); auto op = reinterpret_cast(cdata); @@ -853,4 +853,4 @@ int MatmulFp32BaseCPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_base.h b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_base.h index f1ac4f11..cb5f2182 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_base.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_base.h @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { using MatrixPackFun = void (*)(const float *src_ptr, float *dst_ptr, int row, int col, int start_row, int end_row); using GemmIsNotPackFun = void (*)(const float *a, const float *b, float *c, const float *bias, int m, int k, int act_type); @@ -147,5 +147,5 @@ class MatmulFp32BaseCPUKernel : public LiteKernel { bool is_sharing_pack_ = true; bool weight_is_packed_{false}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_MATMUL_FP32_BASE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_sse.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_sse.cc index 996e968d..4c09f718 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_sse.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_sse.cc @@ -20,7 +20,7 @@ #include "nnacl_c/fp32/matmul_fp32.h" #include "nnacl_c/fp32/pack_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void MatmulFp32SSECPUKernel::InitGlobalVariable() { matrix_a_.need_pack = true; matrix_b_.need_pack = true; @@ -101,5 +101,5 @@ int MatmulFp32SSECPUKernel::ParallelRunByOC(int task_id) const { } bool MatmulFp32SSECPUKernel::CheckThreadCuttingByRow() { return false; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_sse.h b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_sse.h index 5484336b..2e53a502 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_sse.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/matmul_fp32_sse.h @@ -20,7 +20,7 @@ #if defined(ENABLE_SSE) #include #include "src/litert/kernel/cpu/fp32/matmul_fp32_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatmulFp32SSECPUKernel : public MatmulFp32BaseCPUKernel { public: MatmulFp32SSECPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -37,7 +37,7 @@ class MatmulFp32SSECPUKernel : public MatmulFp32BaseCPUKernel { int ParallelRunByOC(int task_id) const override; bool CheckThreadCuttingByRow() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_MATMUL_FP32_SSE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/non_max_suppression_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/non_max_suppression_fp32.cc index 5c31033f..fda3ad64 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/non_max_suppression_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/non_max_suppression_fp32.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::schema::PrimitiveType_NonMaxSuppression; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr size_t kMinInputsSize = 2; constexpr size_t kMaxInputsSize = 5; @@ -256,4 +256,4 @@ int NonMaxSuppressionCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_NonMaxSuppression, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/non_max_suppression_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/non_max_suppression_fp32.h index 0cce8151..06b12756 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/non_max_suppression_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/non_max_suppression_fp32.h @@ -25,7 +25,7 @@ using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class NonMaxSuppressionCPUKernel : public LiteKernel { public: NonMaxSuppressionCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -100,6 +100,6 @@ class NMSBox { float x2_; float area_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_NON_MAX_SUPPRESSION_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/cast_gather_reduce_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/cast_gather_reduce_fp32.cc index 5a937d59..9033ff7e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/cast_gather_reduce_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/cast_gather_reduce_fp32.cc @@ -26,7 +26,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int CastGatherReduceFusionCPUKernel::Prepare() { if (!InferShapeDone()) { return RET_OK; @@ -92,4 +92,4 @@ REG_KERNEL(kCPU, kNumberTypeInt32, PrimType_Inner_CastGatherReduceFusion, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt64, PrimType_Inner_CastGatherReduceFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/cast_gather_reduce_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/cast_gather_reduce_fp32.h index 9b6084dc..06d8f844 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/cast_gather_reduce_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/cast_gather_reduce_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CastGatherReduceFusionCPUKernel : public LiteKernel { public: CastGatherReduceFusionCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -43,6 +43,6 @@ class CastGatherReduceFusionCPUKernel : public LiteKernel { size_t axis_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_ONLINE_FUSION_CAST_GATHER_REDUCE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/reduce_concat_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/reduce_concat_fp32.cc index 76a7b698..0b0d986c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/reduce_concat_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/reduce_concat_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ReduceConcatFusionCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -79,4 +79,4 @@ int ReduceConcatFusionCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimType_Inner_ReduceConcatFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/reduce_concat_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/reduce_concat_fp32.h index 0b136f94..095722f3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/reduce_concat_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/reduce_concat_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ReduceConcatFusionCPUKernel : public LiteKernel { public: ReduceConcatFusionCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class ReduceConcatFusionCPUKernel : public LiteKernel { int64_t inner_tile_ = 0; int64_t batch_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_ONLINE_FUSION_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/split_reduce_concat_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/split_reduce_concat_fp32.cc index d6f00fb9..1a78a118 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/split_reduce_concat_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/split_reduce_concat_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SplitReduceConcatFusionCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -97,4 +97,4 @@ int SplitReduceConcatFusionCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimType_Inner_SplitReduceConcatFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/split_reduce_concat_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/split_reduce_concat_fp32.h index 376cd7c4..fd63cedd 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/split_reduce_concat_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/online_fusion/split_reduce_concat_fp32.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/split_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SplitReduceConcatFusionCPUKernel : public LiteKernel { public: SplitReduceConcatFusionCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -49,6 +49,6 @@ class SplitReduceConcatFusionCPUKernel : public LiteKernel { int64_t mid_len_ = 0; size_t axis_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_ONLINE_FUSION_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/reduce_scatter_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/reduce_scatter_fp32.cc index 4e72473c..98b01699 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/reduce_scatter_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/reduce_scatter_fp32.cc @@ -23,7 +23,7 @@ using mindspore::kernel::KERNEL_ARCH; using mindspore::lite::KernelRegistrar; using mindspore::schema::PrimitiveType_ReduceScatter; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ReduceScatterCPUKernel::Prepare() { MS_LOG(ERROR) << "unsupported ReduceScatter kernel"; return lite::RET_NOT_SUPPORT; @@ -98,4 +98,4 @@ int ReduceScatterCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ReduceScatter, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/reduce_scatter_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/reduce_scatter_fp32.h index 6dd140fc..b3361001 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/reduce_scatter_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/reduce_scatter_fp32.h @@ -22,7 +22,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/reduce_scatter_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ReduceScatterCPUKernel : public LiteKernel { public: ReduceScatterCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -43,6 +43,6 @@ class ReduceScatterCPUKernel : public LiteKernel { ReduceScatterParameter *param_; TypeId data_type_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_REDUCE_SCATTER_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/relative_position_attention_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/relative_position_attention_fp32.cc index 1bb57069..5f7a2882 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/relative_position_attention_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/relative_position_attention_fp32.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { RelativePositionAttentionCPUKernel::~RelativePositionAttentionCPUKernel() { FreeAllPackData(); } namespace { @@ -753,4 +753,4 @@ int RelativePositionAttentionCPUKernel::Run() { FreePackedRunBuffers(); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/relative_position_attention_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/relative_position_attention_fp32.h index 1ad095d4..20f5d5f9 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/relative_position_attention_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/relative_position_attention_fp32.h @@ -22,7 +22,7 @@ #include "nnacl_c/fp32/attention_fp32.h" #include "nnacl_c/matmul_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { // inputs: 0:Q 1:K 2:V 3:P 4:WQ 5:WK 6:WV 7:WP 8:PU 9:PV 10:WO 11:BQ 12:BK 13:BV 14:BO 15:output // if use_bias == true: has BQ BK BV BO inputs class RelativePositionAttentionCPUKernel : public LiteKernel { @@ -152,6 +152,6 @@ class RelativePositionAttentionCPUKernel : public LiteKernel { RelativePositionAttentionParameter *param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_RELATIVE_POSITION_ATTENTION_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/resize_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/resize_fp32.cc index 63038d3e..e0a74e3f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/resize_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/resize_fp32.cc @@ -31,7 +31,7 @@ using mindspore::schema::CoordinateTransformMode_ASYMMETRIC; using mindspore::schema::CoordinateTransformMode_HALF_PIXEL; using mindspore::schema::PrimitiveType_Resize; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kResizeSizeDouble = 2; } // namespace @@ -267,4 +267,4 @@ int ResizeCPUKernel::SelectCalculatorFunc() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Resize, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/resize_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/resize_fp32.h index a38309d6..1c35bbc6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/resize_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/resize_fp32.h @@ -56,7 +56,7 @@ struct ResizeCoordinate { } }; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ResizeCPUKernel : public ResizeBaseCPUKernel { public: ResizeCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -83,6 +83,6 @@ class ResizeCPUKernel : public ResizeBaseCPUKernel { void *line_buffer_ = nullptr; CalculateOriginalCoordinate calculate_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_RESIZE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/reverse_sequence_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/reverse_sequence_fp32.cc index 389800fc..630bbf25 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/reverse_sequence_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/reverse_sequence_fp32.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ReverseSequence; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ReverseSequenceCPUKernel::Prepare() { MS_CHECK_TRUE_RET(in_tensors_.size() == kInputSize1, RET_ERROR); MS_CHECK_TRUE_RET(out_tensors_.size() == 1, RET_ERROR); @@ -113,4 +113,4 @@ int ReverseSequenceCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ReverseSequence, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/reverse_sequence_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/reverse_sequence_fp32.h index 5960b370..b041ae69 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/reverse_sequence_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/reverse_sequence_fp32.h @@ -20,7 +20,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp32/reverse_sequence_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ReverseSequenceCPUKernel : public LiteKernel { public: ReverseSequenceCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -37,6 +37,6 @@ class ReverseSequenceCPUKernel : public LiteKernel { int CalcCountPreAxis(const std::vector shape, int axis) const; int CalcCountAfterAxis(const std::vector shape, int axis) const; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_REVERSE_SEQUENCE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/roi_pooling_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/roi_pooling_fp32.cc index cbc26b7e..bad21728 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/roi_pooling_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/roi_pooling_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ROIPooling; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ROIPoolingCPUKernel::Prepare() { MS_CHECK_TRUE_RET(in_tensors_.size() == kInputSize1, RET_ERROR); MS_CHECK_TRUE_RET(out_tensors_.size() == 1, RET_ERROR); @@ -119,4 +119,4 @@ int ROIPoolingCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ROIPooling, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/roi_pooling_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/roi_pooling_fp32.h index de6af7b0..9f1420e2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/roi_pooling_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/roi_pooling_fp32.h @@ -20,7 +20,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp32/roi_pooling_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ROIPoolingCPUKernel : public LiteKernel { public: ROIPoolingCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,6 +47,6 @@ class ROIPoolingCPUKernel : public LiteKernel { float *max_c_ = nullptr; ROIPoolingParameter *param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_REVERSE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/scatter_nd_update_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/scatter_nd_update_fp32.cc index 62b90086..65b2b030 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/scatter_nd_update_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/scatter_nd_update_fp32.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ScatterNdUpdate; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int ScatterNdUpdateRun(void *cdata, int task_id, float, float) { auto kernel = static_cast(cdata); @@ -80,4 +80,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ScatterNdUpdate, LiteKernelCr #ifdef ENABLE_FP16 REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_ScatterNdUpdate, LiteKernelCreator) #endif -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/scatter_nd_update_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/scatter_nd_update_fp32.h index a41b3321..dd3a3d0b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/scatter_nd_update_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/scatter_nd_update_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/base/scatter_nd_binary.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ScatterNdUpdateCPUKernel : public ScatterNDBinaryCPUKernel { public: @@ -32,6 +32,6 @@ class ScatterNdUpdateCPUKernel : public ScatterNDBinaryCPUKernel { int Run() override; int ScatterNdUpdate(int task_id); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_FP32_SCATTER_ND_UPDATE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/shape_fusion_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/shape_fusion_fp32.cc index c25bb5b4..90f7e143 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/shape_fusion_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/shape_fusion_fp32.cc @@ -25,7 +25,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ShapeFusionCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), kInputSize1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -49,4 +49,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimType_Inner_ShapeFusion, LiteKernelCreat REG_KERNEL(kCPU, kNumberTypeInt8, PrimType_Inner_ShapeFusion, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeUInt8, PrimType_Inner_ShapeFusion, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt64, PrimType_Inner_ShapeFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/shape_fusion_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/shape_fusion_fp32.h index be8dcfdc..8aedcbe0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/shape_fusion_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/shape_fusion_fp32.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ShapeFusionCPUKernel : public LiteKernel { public: ShapeFusionCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -32,6 +32,6 @@ class ShapeFusionCPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_SHAPE_FUSION_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_batch_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_batch_fp32.cc index dc2cd964..9a09fa5a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_batch_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_batch_fp32.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SpaceToBatch; using mindspore::schema::PrimitiveType_SpaceToBatchND; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void SpaceToBatchCPUKernel::ProcessInput() { auto block_shape_data = in_tensors_.at(SECOND_INPUT)->data(); auto block_shape = static_cast(block_shape_data); @@ -103,4 +103,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SpaceToBatchND, LiteKernelCre REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SpaceToBatch, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SpaceToBatchND, LiteKernelCreator) #endif -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_batch_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_batch_fp32.h index 73c99f6f..40e77ba4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_batch_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_batch_fp32.h @@ -21,7 +21,7 @@ #include "nnacl_c/fp32/space_to_batch_fp32.h" #include "nnacl_c/common_func.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SpaceToBatchCPUKernel : public LiteKernel { public: SpaceToBatchCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -43,6 +43,6 @@ class SpaceToBatchCPUKernel : public LiteKernel { void *input_ptr_ = nullptr; void *output_ptr_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_SPACE_TO_BATCH_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_depth_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_depth_fp32.cc index eb574a07..5ff27227 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_depth_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_depth_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_SpaceToDepth; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SpaceToDepthCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -95,4 +95,4 @@ REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SpaceToDepth, LiteKernelCreat #ifdef ENABLE_FP16 REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SpaceToDepth, LiteKernelCreator) #endif -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_depth_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_depth_fp32.h index 2e46b091..d8a24cf1 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_depth_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/space_to_depth_fp32.h @@ -20,7 +20,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/space_to_depth_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SpaceToDepthCPUKernel : public LiteKernel { public: SpaceToDepthCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -38,6 +38,6 @@ class SpaceToDepthCPUKernel : public LiteKernel { private: SpaceToDepthParameter *param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_SPACE_TO_DEPTH_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_fill_empty_rows_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_fill_empty_rows_fp32.cc index 2c96dac3..efdc0775 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_fill_empty_rows_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_fill_empty_rows_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SparseFillEmptyRows; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const uint32_t kInput_indices = 0; const uint32_t kInput_values = 1; @@ -198,4 +198,4 @@ int SparseFillEmptyRowsCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_SparseFillEmptyRows, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SparseFillEmptyRows, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_fill_empty_rows_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_fill_empty_rows_fp32.h index 8c757266..c981f043 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_fill_empty_rows_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_fill_empty_rows_fp32.h @@ -21,7 +21,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SparseFillEmptyRowsCPUKernel : public LiteKernel { public: SparseFillEmptyRowsCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -46,5 +46,5 @@ class SparseFillEmptyRowsCPUKernel : public LiteKernel { int32_t N_ = 0; int32_t rank_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_SPARSE_FILL_EMPTY_ROWS_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_reshape_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_reshape_fp32.cc index 98e637f1..6998dea4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_reshape_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_reshape_fp32.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SparseReshape; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const uint32_t kInput_indices = 0; const uint32_t kInput_inshape = 1; @@ -186,4 +186,4 @@ int SparseReshapeCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_SparseReshape, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_reshape_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_reshape_fp32.h index c1f84456..cf8b3b91 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_reshape_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_reshape_fp32.h @@ -21,7 +21,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SparseReshapeCPUKernel : public LiteKernel { public: SparseReshapeCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,5 +35,5 @@ class SparseReshapeCPUKernel : public LiteKernel { int Run() override; int SoftCopyInputToOutput(lite::Tensor *src_tensor, lite::Tensor *dst_tensor); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_SPARSE_RESHAPE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_segment_sum_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_segment_sum_fp32.cc index 59491b5b..ddb9b28a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_segment_sum_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_segment_sum_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SparseSegmentSum; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const uint32_t kInput_data = 0; const uint32_t kInput_indices = 1; @@ -118,4 +118,4 @@ int SparseSegmentSumCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_SparseSegmentSum, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SparseSegmentSum, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_segment_sum_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_segment_sum_fp32.h index 448f3f6f..036b9f35 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_segment_sum_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_segment_sum_fp32.h @@ -21,7 +21,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SparseSegmentSumCPUKernel : public LiteKernel { public: SparseSegmentSumCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -34,5 +34,5 @@ class SparseSegmentSumCPUKernel : public LiteKernel { int ReSize() override { return RET_OK; } int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_SPARSE_SEGMENT_SUM_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.cc index 23c2d68d..d54d535f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.cc @@ -30,7 +30,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SparseToDense; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SparseToDenseCPUKernel::Prepare() { MS_CHECK_TRUE_RET(in_tensors_.size() == C4NUM, RET_ERROR); CHECK_NULL_RETURN(in_tensors_[FIRST_INPUT]); // arg: sparse_indices @@ -222,4 +222,4 @@ REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_SparseToDense, LiteKernelCreato #ifdef ENABLE_FP16 REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SparseToDense, LiteKernelCreator) #endif -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.h index c4d0d2ce..8666448a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/sparse_to_dense_fp32.h @@ -23,7 +23,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SparseToDenseCPUKernel : public LiteKernel { public: SparseToDenseCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,5 +47,5 @@ class SparseToDenseCPUKernel : public LiteKernel { void *sparse_values_ = nullptr; void *default_value_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_SPARSE_TO_DENSE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/topk_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/topk_fp32.cc index 91267ed7..bfd62f75 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/topk_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/topk_fp32.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_TopKFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int TopKCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -93,4 +93,4 @@ REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_TopKFusion, LiteKernelCreator) #endif -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/topk_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/topk_fp32.h index 27ebdfa1..9814995b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/topk_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/topk_fp32.h @@ -23,7 +23,7 @@ #include "nnacl_c/fp16/topk_fp16.h" #endif -namespace mindspore::kernel { +namespace mindspore::lite::kernel { typedef void (*TopKFunc)(void *input_data, void *output_data, int32_t *output_index, TopkParameter *parameter); class TopKCPUKernel : public LiteKernel { public: @@ -61,6 +61,6 @@ class TopKCPUKernel : public LiteKernel { TopkParameter *topk_param_; TopKFunc topk_func_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_TOPK_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/transpose_server_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/transpose_server_fp32.cc index 18233eea..23a60eb6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/transpose_server_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/transpose_server_fp32.cc @@ -22,7 +22,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Transpose; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int64_t kMinCostPerThread = 1 << 18; } @@ -130,5 +130,5 @@ int TransposeServerCPUKernel::DoTransposeMultiThread(int task_id) { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Transpose, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Transpose, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/transpose_server_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/transpose_server_fp32.h index 3793c3fc..b04a61b0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/transpose_server_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/transpose_server_fp32.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/cpu/base/transpose_base.h" #include "nnacl_c/fp32/transpose_server_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TransposeServerCPUKernel : public TransposeBaseCPUKernel { public: explicit TransposeServerCPUKernel(OpParameter *param, const std::vector &inputs, @@ -41,7 +41,7 @@ class TransposeServerCPUKernel : public TransposeBaseCPUKernel { std::vector strides_; std::vector block_boundary_infos_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_TRANSPOSE_SERVER_FP32_H_ #endif diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/uniform_real_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/uniform_real_fp32.cc index 2318a821..b8df6288 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/uniform_real_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/uniform_real_fp32.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_UniformReal; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr size_t kFirstKeyIndex = 0; constexpr size_t kSecondKeyIndex = 1; @@ -231,4 +231,4 @@ int UniformRealCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_UniformReal, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/uniform_real_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/uniform_real_fp32.h index 790d1e29..c823abd8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/uniform_real_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/uniform_real_fp32.h @@ -20,7 +20,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/random_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class UniformRealCPUKernel : public LiteKernel { public: UniformRealCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -38,6 +38,6 @@ class UniformRealCPUKernel : public LiteKernel { int seed_ = 0; int seed2_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_UNIFORM_REAL_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/unstack_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32/unstack_fp32.cc index 3360ac18..f105df08 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/unstack_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/unstack_fp32.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Unstack; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int UnstackCPUKernel::Prepare() { MS_CHECK_TRUE_RET(in_tensors_.size() == 1, RET_ERROR); MS_CHECK_TRUE_RET(out_tensors_.size() >= 1, RET_ERROR); @@ -90,4 +90,4 @@ REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Unstack, LiteKernelCreator) #endif -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32/unstack_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32/unstack_fp32.h index 2e11ac41..588ddfbd 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32/unstack_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32/unstack_fp32.h @@ -20,7 +20,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/base/unstack_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class UnstackCPUKernel : public LiteKernel { public: UnstackCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,6 +35,6 @@ class UnstackCPUKernel : public LiteKernel { private: void **output_addr_array_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_UNSTACK_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/activation_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/activation_grad.cc index 15f6ac92..d75030fd 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/activation_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/activation_grad.cc @@ -32,7 +32,7 @@ using mindspore::schema::ActivationType_RELU; using mindspore::schema::ActivationType_RELU6; using mindspore::schema::PrimitiveType_ActivationGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ActivationGradCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C2NUM); CHECK_LESS_RETURN(out_tensors_.size(), C1NUM); @@ -114,4 +114,4 @@ int ActivationGradCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ActivationGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/activation_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/activation_grad.h index 1e0e79cc..85161d04 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/activation_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/activation_grad.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp32/activation_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ActivationGradCPUKernel : public LiteKernel { public: explicit ActivationGradCPUKernel(OpParameter *param, const std::vector &inputs, @@ -40,6 +40,6 @@ class ActivationGradCPUKernel : public LiteKernel { ActivationParameter *param_act_grad_; int thread_count_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_ACTIVATION_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam.cc index 1a783e93..2f63a2a3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Adam; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr static int kBeta1PowerIdx = 3; constexpr static int kBeta2PowerIdx = 4; constexpr static int kBeta1Idx = 6; @@ -159,4 +159,4 @@ kernel::LiteKernel *CpuAdamFp32KernelCreator(const std::vector & } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Adam, CpuAdamFp32KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam.h index dfcc8835..8ad493ff 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam.h @@ -21,7 +21,7 @@ #include "src/train/optimizer_kernel.h" #include "nnacl_c/fp32_grad/optimizer.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr int kAdamLrIndex = 5; constexpr int kAdamGradIndex = 9; @@ -51,6 +51,6 @@ class AdamCPUKernel : public OptimizerKernel { int thread_count_; AdamParameter *adam_param_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_ADAM_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam_weight_decay.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam_weight_decay.cc index 5c30fb8d..8e1308dc 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam_weight_decay.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/adam_weight_decay.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_AdamWeightDecay; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr static int kBeta1Idx = 4; constexpr static int kBeta2Idx = 5; @@ -146,4 +146,4 @@ kernel::LiteKernel *CpuAdamWeightDecayFp32KernelCreator(const std::vector #include "src/train/optimizer_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr static int kLrIdx = 3; constexpr static int kGradientIdx = 8; @@ -45,6 +45,6 @@ class AdamWeightDecayCPUKernel : public OptimizerKernel { private: int thread_count_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_ADAM_WEIGHT_DECAY_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/apply_momentum.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/apply_momentum.cc index 7bccec6e..7fd4b10b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/apply_momentum.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/apply_momentum.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ApplyMomentum; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ApplyMomentumCPUKernel::ReSize() { return RET_OK; } static int DoApplyMomentum(float *weight, float *accumulate, float learning_rate, const float *gradient, float moment, @@ -158,4 +158,4 @@ kernel::LiteKernel *CpuApplyMomentumFp32KernelCreator(const std::vector } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Assign, CpuAssignFp32KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/assign.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/assign.h index 9b139115..a72200fc 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/assign.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/assign.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp32_grad/optimizer.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class AssignCPUKernel : public LiteKernel { public: explicit AssignCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -36,6 +36,6 @@ class AssignCPUKernel : public LiteKernel { protected: int thread_count_ = 1; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_ASSIGN_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bias_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bias_grad.cc index 0c9c666e..72953068 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bias_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bias_grad.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_BiasAddGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int BiasGradCPUKernel::ReSize() { auto dims = in_tensors_[0]->shape(); bias_param->ndim_ = dims.size(); @@ -111,4 +111,4 @@ kernel::LiteKernel *CpuBiasGradFp32KernelCreator(const std::vector &inputs, @@ -39,6 +39,6 @@ class BiasGradCPUKernel : public LiteKernel { private: ArithmeticParameter *bias_param; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_BIAS_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy.cc index d0376e14..81dba5aa 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_BinaryCrossEntropy; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { BinaryCrossEntropyCPUKernel::~BinaryCrossEntropyCPUKernel() { if (tmp_loss_ != nullptr) { free(tmp_loss_); @@ -116,4 +116,4 @@ kernel::LiteKernel *CpuBinaryCrossEntropyFp32KernelCreator(const std::vector #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BinaryCrossEntropyCPUKernel : public LiteKernel { public: explicit BinaryCrossEntropyCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -37,6 +37,6 @@ class BinaryCrossEntropyCPUKernel : public LiteKernel { bool weight_defined_{false}; float *weight_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_BINARY_CROSS_ENTROPY_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy_grad.cc index 36b3676c..45a64dcb 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy_grad.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_BinaryCrossEntropyGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int BinaryCrossEntropyGradCPUKernel::ReSize() { CHECK_LESS_RETURN(in_tensors_.size(), C3NUM); CHECK_LESS_RETURN(out_tensors_.size(), C1NUM); @@ -101,4 +101,4 @@ kernel::LiteKernel *CpuBinaryCrossEntropyGradFp32KernelCreator(const std::vector } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BinaryCrossEntropyGrad, CpuBinaryCrossEntropyGradFp32KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy_grad.h index 0a088dec..9bb70185 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/binary_cross_entropy_grad.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BinaryCrossEntropyGradCPUKernel : public LiteKernel { public: explicit BinaryCrossEntropyGradCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -36,6 +36,6 @@ class BinaryCrossEntropyGradCPUKernel : public LiteKernel { bool weight_defined_{false}; float *weight_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_BINARY_CROSS_ENTROPY_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bn_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bn_grad.cc index ea84f8db..6c270667 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bn_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bn_grad.cc @@ -32,7 +32,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_BatchNormGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kMaxTaskNum = 3; constexpr int kNumInputDim2 = 2; @@ -199,4 +199,4 @@ kernel::LiteKernel *CpuBNGradFp32KernelCreator(const std::vector } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BatchNormGrad, CpuBNGradFp32KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bn_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bn_grad.h index 8342ca80..28058396 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bn_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/bn_grad.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BNGradCPUKernel : public LiteKernel { public: @@ -36,5 +36,5 @@ class BNGradCPUKernel : public LiteKernel { private: int stage_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_BN_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution.cc index 0639cec2..16613f24 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution.cc @@ -24,7 +24,7 @@ using mindspore::kernel::KERNEL_ARCH; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ConvolutionTrainCPUKernel::ReSize() { if (in_tensors_.size() < 2) { MS_LOG(ERROR) << "Convolution should have at least two inputs"; @@ -185,4 +185,4 @@ kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector #include "src/executor/kernel_exec.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionTrainCPUKernel : public LiteKernel { public: explicit ConvolutionTrainCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,6 +47,6 @@ class ConvolutionTrainCPUKernel : public LiteKernel { kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, const kernel::KernelKey &desc); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_CONVOLUTION_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_filter.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_filter.cc index 35fc941a..7d51b7bc 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_filter.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_filter.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2DBackpropFilterFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr static int kDyIdx = 0; constexpr static int kXIdx = 1; constexpr static int kDwIdx = 0; @@ -226,4 +226,4 @@ kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vector #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionGradFilterCPUKernel : public LiteKernel { public: explicit ConvolutionGradFilterCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -46,6 +46,6 @@ class ConvolutionGradFilterCPUKernel : public LiteKernel { const int chunk_ = C12NUM * 2; #endif }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_CONVOLUTION_GRAD_FILTER_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_input.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_input.cc index 515ce2ec..4ba63d36 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_input.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_input.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2DBackpropInputFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ConvolutionGradInputCPUKernel::ReSize() { auto *dy_tensor = in_tensors_.at(kInputIndex); MS_ASSERT(dy_tensor != nullptr); @@ -188,4 +188,4 @@ int ConvolutionGradInputCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Conv2DBackpropInputFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_input.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_input.h index 5ab29a8e..3183b5dc 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_input.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/convolution_grad_input.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionGradInputCPUKernel : public LiteKernel { public: explicit ConvolutionGradInputCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -44,6 +44,6 @@ class ConvolutionGradInputCPUKernel : public LiteKernel { const int chunk_ = C12NUM; #endif }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_CONVOLUTION_GRAD_INPUT_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/deconvolution_grad_filter.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/deconvolution_grad_filter.cc index 94ced4c2..bfa37aca 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/deconvolution_grad_filter.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/deconvolution_grad_filter.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_DeConv2DGradFilter; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int DeConvolutionGradFilterCPUKernel::Prepare() { // dy is in input 0 // x is in input 1 @@ -158,4 +158,4 @@ kernel::LiteKernel *CpuDeConvGradFilterFp32KernelCreator(const std::vector #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DeConvolutionGradFilterCPUKernel : public LiteKernel { public: explicit DeConvolutionGradFilterCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -37,6 +37,6 @@ class DeConvolutionGradFilterCPUKernel : public LiteKernel { size_t ws_size = 0; const int chunk = 1; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_DECONVOLUTION_GRAD_FILTER_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout.cc index aef8c820..2870b309 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Dropout; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr float kOne = 1.0f; int DropoutCPUKernel::Prepare() { @@ -129,4 +129,4 @@ kernel::LiteKernel *CpuDropoutFp32KernelCreator(const std::vector #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DropoutCPUKernel : public LiteKernel { public: DropoutCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -41,6 +41,6 @@ class DropoutCPUKernel : public LiteKernel { std::bernoulli_distribution distribution_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_DROPOUT_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout_grad.cc index f057ba4f..41645be4 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/dropout_grad.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_DropoutGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr static int kInputIdx = 0; constexpr static int kMaskIdx = 1; constexpr static int kOutputIdx = 0; @@ -123,4 +123,4 @@ kernel::LiteKernel *CpuDropoutGradFp32KernelCreator(const std::vector #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DropoutGradCPUKernel : public LiteKernel { public: DropoutGradCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -38,6 +38,6 @@ class DropoutGradCPUKernel : public LiteKernel { int thread_count_ = 1; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_DROPOUT_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/layernorm_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/layernorm_grad.cc index 4edbed4a..757b2a3c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/layernorm_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/layernorm_grad.cc @@ -30,7 +30,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LayerNormGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int LayerNormGradCPUKernel::ReSize() { return RET_OK; } int LayerNormGradCPUKernel::Prepare() { @@ -129,4 +129,4 @@ int LayerNormGradCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LayerNormGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/layernorm_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/layernorm_grad.h index 4aba8435..32cc2406 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/layernorm_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/layernorm_grad.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class LayerNormGradCPUKernel : public LiteKernel { public: @@ -39,5 +39,5 @@ class LayerNormGradCPUKernel : public LiteKernel { int param_num_ = 1; int param_size_ = 1; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_LAYERNORM_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_data_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_data_fp32.cc index 1d783c5d..718c0b69 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_data_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_data_fp32.cc @@ -22,7 +22,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/fp32/lstm_fp32.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; @@ -283,4 +283,4 @@ void LSTMGradDataCPUKernel::FreeRunBuffer() { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LSTMGradData, LiteKernelCreator) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_data_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_data_fp32.h index 6f1f62b0..b6743f8d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_data_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_data_fp32.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp32_grad/lstm_grad_fp32.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class LSTMGradDataCPUKernel : public LiteKernel { public: @@ -82,6 +82,6 @@ class LSTMGradDataCPUKernel : public LiteKernel { LstmGradParameter *lstm_param_ = nullptr; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_LSTM_GRAD_DATA_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_fp32.cc index a8edc072..20f64d48 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_fp32.cc @@ -22,7 +22,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/fp32/lstm_fp32.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; @@ -298,4 +298,4 @@ void LSTMGradCPUKernel::FreeRunBuffer() { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LSTMGrad, LiteKernelCreator) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_fp32.h index bca70e2d..15549af1 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_fp32.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp32_grad/lstm_grad_fp32.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class LSTMGradCPUKernel : public LiteKernel { public: @@ -88,6 +88,6 @@ class LSTMGradCPUKernel : public LiteKernel { LstmGradParameter *lstm_param_ = nullptr; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_LSTM_GRAD_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_weight_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_weight_fp32.cc index 4b064da1..55c8bdcc 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_weight_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_weight_fp32.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/fp32/lstm_fp32.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; @@ -245,4 +245,4 @@ void LSTMGradWeightCPUKernel::FreeRunBuffer() { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LSTMGradWeight, LiteKernelCreator) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_weight_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_weight_fp32.h index 5db3f80e..f376e0ab 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_weight_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/lstm_grad_weight_fp32.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp32_grad/lstm_grad_fp32.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class LSTMGradWeightCPUKernel : public LiteKernel { public: @@ -71,6 +71,6 @@ class LSTMGradWeightCPUKernel : public LiteKernel { LstmGradParameter *lstm_param_ = nullptr; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_LSTM_GRAD_WEIGHT_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/make_tuple.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/make_tuple.h index 190907e1..1468926d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/make_tuple.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/make_tuple.h @@ -22,7 +22,7 @@ #include "src/litert/kernel/cpu/nnacl/fp32/arithmetic.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MakeTupleCPUKernel : public LiteKernel { public: explicit MakeTupleCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class MakeTupleCPUKernel : public LiteKernel { private: OpParameter *param; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_MAKE_TUPLE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/neg_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/neg_grad.cc index 916d7e40..9ef2db4f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/neg_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/neg_grad.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_NegGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int NegGradRun(void *cdata, int task_id, float lhs_scale, float rhs_scale) { CHECK_NULL_RETURN(cdata); @@ -94,4 +94,4 @@ kernel::LiteKernel *CpuNegGradFp32KernelCreator(const std::vector) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/nllloss_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/nllloss_grad.h index 5b585728..0718a1f2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/nllloss_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/nllloss_grad.h @@ -22,7 +22,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/nllloss_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class NLLLossGradCPUKernel : public LiteKernel { public: NLLLossGradCPUKernel(OpParameter *param, const std::vector &inputs, @@ -41,6 +41,6 @@ class NLLLossGradCPUKernel : public LiteKernel { int class_num_{0}; NLLLossParameter *nllloss_param_{nullptr}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_NLLLOSS_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/pooling_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/pooling_grad.cc index f79c2401..593f2ce2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/pooling_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/pooling_grad.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_AvgPoolGrad; using mindspore::schema::PrimitiveType_MaxPoolGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kNumShapeDim2 = 1; constexpr int kNumShapeDim3 = 2; @@ -140,4 +140,4 @@ kernel::LiteKernel *CpuPoolingGradFp32KernelCreator(const std::vector &inputs, @@ -47,6 +47,6 @@ class PowerGradCPUKernel : public LiteKernel { float shift_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_POWER_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/resize_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/resize_grad.cc index 43ec0469..585a739d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/resize_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/resize_grad.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ResizeGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { float ResizeGradCPUKernel::Scaling(const size_t in_size, const size_t out_size, const bool align_corners) { return (align_corners && out_size > 1) ? (in_size - 1) / (static_cast(out_size - 1)) : in_size / (static_cast(out_size)); @@ -112,4 +112,4 @@ int ResizeGradCPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ResizeGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/resize_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/resize_grad.h index 9a6ea8e4..491d6a84 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/resize_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/resize_grad.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ResizeGradCPUKernel : public LiteKernel { public: explicit ResizeGradCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -36,6 +36,6 @@ class ResizeGradCPUKernel : public LiteKernel { private: float Scaling(size_t in_size, size_t out_size, bool align_corners); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_RESIZE_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sgd.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sgd.cc index 43db06ad..6c3349a0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sgd.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sgd.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SGD; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SgdCPUKernel::ReSize() { return RET_OK; } int DoSgd(float *weight, float *accumulate, float *gradient, float learning_rate, float dampening, float moment, @@ -268,4 +268,4 @@ kernel::LiteKernel *CpuSgdFp32KernelCreator(const std::vector &i } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SGD, CpuSgdFp32KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sgd.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sgd.h index 2b8758b0..53dda6ac 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sgd.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sgd.h @@ -22,7 +22,7 @@ #include "src/train/optimizer_kernel.h" #include "nnacl_c/fp32_grad/optimizer.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr int kSgdLrIndex = 2; constexpr int kSgdGradIndex = 1; @@ -55,6 +55,6 @@ class SgdCPUKernel : public OptimizerKernel { SgdParameter *sgd_param_; std::atomic sgd_stat_{0.0f}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_SGD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits.cc index c751a8a0..6a62ecef 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SigmoidCrossEntropyWithLogits; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SigmoidCrossEntropyWithLogitsCPUKernel::ReSize() { CHECK_NULL_RETURN(op_parameter_); CHECK_LESS_RETURN(in_tensors_.size(), 2); @@ -97,4 +97,4 @@ kernel::LiteKernel *CpuSigmoidCrossEntropyWithLogitsFp32KernelCreator(const std: REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SigmoidCrossEntropyWithLogits, CpuSigmoidCrossEntropyWithLogitsFp32KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits.h index 04a29b57..4d47514e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SigmoidCrossEntropyWithLogitsCPUKernel : public LiteKernel { public: explicit SigmoidCrossEntropyWithLogitsCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -33,6 +33,6 @@ class SigmoidCrossEntropyWithLogitsCPUKernel : public LiteKernel { int Run() override; int DoExecute(int task_id); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc index 7a122f86..50db2a4c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SigmoidCrossEntropyWithLogitsGradCPUKernel::ReSize() { CHECK_NULL_RETURN(op_parameter_); CHECK_LESS_RETURN(in_tensors_.size(), 3); @@ -100,4 +100,4 @@ kernel::LiteKernel *CpuSigmoidCrossEntropyWithLogitsGradFp32KernelCreator(const REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SigmoidCrossEntropyWithLogitsGrad, CpuSigmoidCrossEntropyWithLogitsGradFp32KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h index 368457eb..d856957e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SigmoidCrossEntropyWithLogitsGradCPUKernel : public LiteKernel { public: explicit SigmoidCrossEntropyWithLogitsGradCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -33,6 +33,6 @@ class SigmoidCrossEntropyWithLogitsGradCPUKernel : public LiteKernel { int Run() override; int DoExecute(int task_id); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/smooth_l1_loss.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/smooth_l1_loss.cc index 487778cf..c4155d81 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/smooth_l1_loss.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/smooth_l1_loss.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SmoothL1Loss; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr static int kPredictIdx = 0; constexpr static int kTargetIdx = 1; constexpr static int kOutputIdx = 0; @@ -116,4 +116,4 @@ kernel::LiteKernel *CpuSmoothL1LossFp32KernelCreator(const std::vector &inputs, @@ -39,6 +39,6 @@ class SmoothL1LossCPUKernel : public LiteKernel { SmoothL1LossParameter *smooth_l1_param_; size_t thread_count_ = 1; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_SMOOTH_L1_LOSS_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/smooth_l1_loss_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/smooth_l1_loss_grad.cc index 2b5de966..430499ef 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/smooth_l1_loss_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/smooth_l1_loss_grad.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SmoothL1LossGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr static int kPredictIdx = 0; constexpr static int kTargetIdx = 1; constexpr static int kDlossIdx = 2; @@ -112,4 +112,4 @@ kernel::LiteKernel *CpuSmoothL1LossGradFp32KernelCreator(const std::vector &inputs, @@ -39,6 +39,6 @@ class SmoothL1LossGradCPUKernel : public LiteKernel { SmoothL1LossParameter *smooth_l1_param_; int thread_count_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_SMOOTH_L1_LOSS_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/softmax_cross_entropy_with_logits.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/softmax_cross_entropy_with_logits.cc index 67551d30..b166565c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/softmax_cross_entropy_with_logits.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/softmax_cross_entropy_with_logits.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SoftmaxCrossEntropyWithLogits; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SoftmaxCrossEntropyWithLogitsCPUKernel::Prepare() { return ReSize(); } int SoftmaxCrossEntropyWithLogitsCPUKernel::DoExecute(int task_id) { @@ -122,4 +122,4 @@ kernel::LiteKernel *CpuSoftmaxCrossEntropyFp32KernelCreator(const std::vector
  • (op_parameter_); auto in_shape = in_tensors_.at(0)->shape(); @@ -105,4 +105,4 @@ kernel::LiteKernel *CpuSoftmaxGradFp32KernelCreator(const std::vector &inputs, @@ -43,6 +43,6 @@ class SoftmaxGradCPUKernel : public LiteKernel { size_t inner_size_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_SOFTMAX_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc index 200c0e14..9fd9e940 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SparseSoftmaxCrossEntropyWithLogits; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::ReSize() { return Prepare(); } int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::ForwardPostExecute(const int *labels, const float *losses, @@ -203,4 +203,4 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Prepare() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SparseSoftmaxCrossEntropyWithLogits, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sparse_softmax_cross_entropy_with_logits.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sparse_softmax_cross_entropy_with_logits.h index 12730a04..8792db27 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sparse_softmax_cross_entropy_with_logits.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/sparse_softmax_cross_entropy_with_logits.h @@ -23,7 +23,7 @@ #include "nnacl_c/fp32/arithmetic_fp32.h" #include "nnacl_c/softmax_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SparseSoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel { public: @@ -61,6 +61,6 @@ class SparseSoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel { int threads_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_SPARSE_SOFTMAX_CROSS_ENTROPY_WITH_LOGITS_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/strided_slice_grad.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/strided_slice_grad.cc index 4e1d2dbe..b793d1e8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/strided_slice_grad.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/strided_slice_grad.cc @@ -30,7 +30,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_StridedSliceGrad; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int StridedSliceGradCPUKernel::Prepare() { if (!InferShapeDone()) { return RET_OK; @@ -225,4 +225,4 @@ void StridedSliceGradCPUKernel::FreeRunBuffer() { } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_StridedSliceGrad, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/strided_slice_grad.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/strided_slice_grad.h index 2beef6a2..1ada43b7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/strided_slice_grad.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/strided_slice_grad.h @@ -21,7 +21,7 @@ #include "nnacl_c/strided_slice_parameter.h" #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class StridedSliceGradCPUKernel : public LiteKernel { public: StridedSliceGradCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -49,6 +49,6 @@ class StridedSliceGradCPUKernel : public LiteKernel { float *temp_input_ = nullptr; float *temp_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_STRIDED_SLICE_GRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/unsorted_segment_sum.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/unsorted_segment_sum.cc index ae714355..6324a0ae 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/unsorted_segment_sum.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/unsorted_segment_sum.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_UnsortedSegmentSum; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int UnsortedSegmentSumCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 2); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -112,4 +112,4 @@ int UnsortedSegmentSumCPUKernel::DoExecute(int task_id) { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_UnsortedSegmentSum, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_UnsortedSegmentSum, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/unsorted_segment_sum.h b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/unsorted_segment_sum.h index 7bac2af1..f0281669 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_grad/unsorted_segment_sum.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_grad/unsorted_segment_sum.h @@ -20,7 +20,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class UnsortedSegmentSumCPUKernel : public LiteKernel { public: UnsortedSegmentSumCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -39,6 +39,6 @@ class UnsortedSegmentSumCPUKernel : public LiteKernel { size_t output_dim0_ = 0; size_t output_dim1_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_GRAD_UNSORTED_SEGMENT_SUM_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_sparse/matmul_sparse_fp32.cc b/mindspore-lite/src/litert/kernel/cpu/fp32_sparse/matmul_sparse_fp32.cc index 220901e8..25c69824 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_sparse/matmul_sparse_fp32.cc +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_sparse/matmul_sparse_fp32.cc @@ -32,7 +32,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_MatMulFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr size_t kFirstDimIdx = 0; constexpr size_t kSecondDimIdx = 1; constexpr size_t kThirdDimIdx = 2; @@ -336,4 +336,4 @@ MatmulSparseCPUKernel::~MatmulSparseCPUKernel() { free(sparsity_weight_->data); delete (this->sparsity_weight_); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/fp32_sparse/matmul_sparse_fp32.h b/mindspore-lite/src/litert/kernel/cpu/fp32_sparse/matmul_sparse_fp32.h index 1e6c94b6..29a1eb7b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/fp32_sparse/matmul_sparse_fp32.h +++ b/mindspore-lite/src/litert/kernel/cpu/fp32_sparse/matmul_sparse_fp32.h @@ -22,7 +22,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/fp32/transpose_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { struct SparsityWeight { uint32_t nnz; float *data; @@ -58,5 +58,5 @@ class MatmulSparseCPUKernel : public LiteKernel { size_t matrix_a_pack_size_ = 0; float *bias_pack_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_FP32_SPARSE_MATMUL_SPARSE_FP32_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/activation_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/activation_int8.cc index 9bc410e7..6d82ed31 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/activation_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/activation_int8.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Activation; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { kernel::LiteKernel *CpuActivationInt8KernelCreator(const std::vector &inputs, const std::vector &outputs, OpParameter *parameter, const lite::InnerContext *ctx, const KernelKey &desc) { @@ -71,4 +71,4 @@ kernel::LiteKernel *CpuActivationInt8KernelCreator(const std::vector) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_AddFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/add_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/add_int8.h index 586184fc..e4d1f5d6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/add_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/add_int8.h @@ -23,7 +23,7 @@ #include "nnacl_c/int8/add_int8.h" #include "nnacl_c/arithmetic_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class QuantizedAddCPUKernel : public LiteKernel { public: explicit QuantizedAddCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -55,6 +55,6 @@ class QuantizedAddCPUKernel : public LiteKernel { }; int AddInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_ADD_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/argminmax_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/argminmax_int8.cc index bb21bebd..b8a7d3a7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/argminmax_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/argminmax_int8.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_ArgMaxFusion; using mindspore::schema::PrimitiveType_ArgMinFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { ArgMinMaxInt8CPUKernel::~ArgMinMaxInt8CPUKernel() { if (in_quant_arg_ != nullptr) { free(in_quant_arg_); @@ -159,4 +159,4 @@ int ArgMinMaxInt8CPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_ArgMaxFusion, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_ArgMinFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/argminmax_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/argminmax_int8.h index 4eb95ab0..202035e8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/argminmax_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/argminmax_int8.h @@ -24,7 +24,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/kernel/arg_min_max.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ArgMinMaxInt8CPUKernel : public LiteKernel { public: ArgMinMaxInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -42,6 +42,6 @@ class ArgMinMaxInt8CPUKernel : public LiteKernel { QuantArg *out_quant_arg_ = nullptr; ArgMinMaxComputeParam *compute_param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_ARGMINMAX_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_int8.cc index ed066209..368af835 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_int8.cc @@ -38,7 +38,7 @@ using mindspore::schema::PrimitiveType_LessEqual; using mindspore::schema::PrimitiveType_MulFusion; using mindspore::schema::PrimitiveType_NotEqual; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int ArithmeticsInt8Launch(void *cdata, int task_id, float, float) { auto arithmetic_kernel = reinterpret_cast(cdata); @@ -219,4 +219,4 @@ REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_LessEqual, CpuArithmeticInt8Kern REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Greater, CpuArithmeticInt8KernelCreator) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_GreaterEqual, CpuArithmeticInt8KernelCreator) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Eltwise, CpuArithmeticInt8KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_int8.h index 3c60c6fd..c6964e32 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_int8.h @@ -22,7 +22,7 @@ #include "schema/model_generated.h" #include "nnacl_c/int8/arithmetic_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ArithmeticInt8CPUKernel : public LiteKernel { typedef int (*ArithmeticRunInt8)(int8_t *input0, int8_t *input1, uint8_t *output, int element_size, ArithmeticQuantArg *quant_arg); @@ -44,5 +44,5 @@ class ArithmeticInt8CPUKernel : public LiteKernel { ArithmeticRunInt8 arithmetic_run_{nullptr}; ArithmeticQuantArg quant_args_ = {}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_ARITHMETIC_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_self_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_self_int8.cc index 3697b6e0..21988286 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_self_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_self_int8.cc @@ -25,7 +25,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ArithmeticSelfInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -136,4 +136,4 @@ REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Rsqrt, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_LogicalNot, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Reciprocal, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_self_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_self_int8.h index 8930fe32..b0a84930 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_self_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/arithmetic_self_int8.h @@ -37,7 +37,7 @@ using mindspore::schema::PrimitiveType_Sin; using mindspore::schema::PrimitiveType_Sqrt; using mindspore::schema::PrimitiveType_Square; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ArithmeticSelfInt8CPUKernel : public LiteKernel { typedef int (*ArithmeticSelfInt8Run)(const int8_t *input, int8_t *output, int element_size, ArithSelfQuantArg para); @@ -104,6 +104,6 @@ class ArithmeticSelfInt8CPUKernel : public LiteKernel { int8_t *in_ptr_{nullptr}; int8_t *out_ptr_{nullptr}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_ARITHMETIC_SELF_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/batch_to_space_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/batch_to_space_int8.cc index b6560a89..8897b3c5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/batch_to_space_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/batch_to_space_int8.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_BatchToSpace; using mindspore::schema::PrimitiveType_BatchToSpaceND; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { BatchToSpaceInt8CPUKernel::~BatchToSpaceInt8CPUKernel() { if (in_quant_arg_ != nullptr) { free(in_quant_arg_); @@ -159,4 +159,4 @@ int BatchToSpaceInt8CPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_BatchToSpace, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_BatchToSpaceND, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/batch_to_space_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/batch_to_space_int8.h index 9cd263d8..c48e0ea2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/batch_to_space_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/batch_to_space_int8.h @@ -23,7 +23,7 @@ #include "nnacl_c/int8/batch_to_space_int8.h" #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BatchToSpaceInt8CPUKernel : public LiteKernel { public: BatchToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -44,6 +44,6 @@ class BatchToSpaceInt8CPUKernel : public LiteKernel { int32_t crops_[COMM_SHAPE_SIZE] = {0}; bool no_crop_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_BATCH_TO_SPACE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/batchnorm_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/batchnorm_int8.cc index 10ed8e28..fc86917a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/batchnorm_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/batchnorm_int8.cc @@ -35,7 +35,7 @@ constexpr int kNumInput2 = 2; constexpr int kNumInput3 = 3; constexpr int kNumInput4 = 4; } // namespace -namespace mindspore::kernel { +namespace mindspore::lite::kernel { BatchnormInt8CPUKernel::~BatchnormInt8CPUKernel() { if (alpha_addr_ != nullptr) { free(alpha_addr_); @@ -260,4 +260,4 @@ int BatchnormInt8CPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_BatchNorm, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_FusedBatchNorm, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/batchnorm_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/batchnorm_int8.h index 3312cd0f..fcf9c98a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/batchnorm_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/batchnorm_int8.h @@ -24,7 +24,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BatchnormInt8CPUKernel : public LiteKernel { public: BatchnormInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -48,6 +48,6 @@ class BatchnormInt8CPUKernel : public LiteKernel { int unit_ = 0; int units_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_BATCHNORM_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/concat_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/concat_int8.cc index 77be9220..96341053 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/concat_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/concat_int8.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Concat; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ConcatInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_NULL_RETURN(in_tensors_.front()); @@ -144,4 +144,4 @@ void ConcatInt8CPUKernel::DoExecute(int task_id) { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Concat, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/concat_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/concat_int8.h index 89d691d9..aa74294e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/concat_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/concat_int8.h @@ -24,7 +24,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/kernel/concat.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConcatInt8CPUKernel : public LiteKernel { public: ConcatInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -65,6 +65,6 @@ class ConcatInt8CPUKernel : public LiteKernel { }; int ConcatInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_CONCAT_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_1x1_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_1x1_int8.cc index c651510d..21d7aea0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_1x1_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_1x1_int8.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { Convolution1x1Int8CPUKernel::~Convolution1x1Int8CPUKernel() { if (matmul_param_ != nullptr) { delete matmul_param_; @@ -619,4 +619,4 @@ int Convolution1x1Int8CPUKernel::Run() { FreeRunBuf(); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_1x1_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_1x1_int8.h index 0d009cbf..c880fcbc 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_1x1_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_1x1_int8.h @@ -28,7 +28,7 @@ #include "nnacl_c/matmul_parameter.h" #include "src/common/utils.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class Convolution1x1Int8CPUKernel : public ConvolutionBaseCPUKernel { public: Convolution1x1Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -86,6 +86,6 @@ class Convolution1x1Int8CPUKernel : public ConvolutionBaseCPUKernel { bool support_optimize_ = false; bool filter_peroc_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_CONVOLUTION_1X1_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_3x3_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_3x3_int8.cc index e408695f..9a4c0cbf 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_3x3_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_3x3_int8.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr size_t kUnitBufferMultipler = 4 * 4; } // namespace @@ -273,4 +273,4 @@ int Convolution3x3Int8CPUKernel::Run() { FreeTmpBuffer(); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_3x3_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_3x3_int8.h index dd9621bd..65f7d3a8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_3x3_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_3x3_int8.h @@ -22,7 +22,7 @@ #include "nnacl_c/fp32/winograd_transform.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class Convolution3x3Int8CPUKernel : public ConvolutionBaseCPUKernel { public: Convolution3x3Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,6 +47,6 @@ class Convolution3x3Int8CPUKernel : public ConvolutionBaseCPUKernel { int8_t *tmp_out_ = nullptr; }; int ProcessFilterUint8(const int8_t *origin_weight, int16_t *dst_weight, const ConvParameter *conv_param); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_CONVOLUTION_3X3_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_3x3_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_3x3_int8.cc index 66229b09..48356c18 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_3x3_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_3x3_int8.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kConvDepthwise3x3BufferSize = 64 * 10 * 10; constexpr int kChannelUnit = 8; @@ -218,4 +218,4 @@ int ConvolutionDepthwise3x3Int8CPUKernel::Run() { ms_context_->allocator->Free(buffer_); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_3x3_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_3x3_int8.h index 12a8f3f9..91c1fdc1 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_3x3_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_3x3_int8.h @@ -22,7 +22,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwise3x3Int8CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwise3x3Int8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -45,6 +45,6 @@ class ConvolutionDepthwise3x3Int8CPUKernel : public ConvolutionBaseCPUKernel { int8_t *output_ptr_ = nullptr; int8_t *buffer_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_CONVOLUTION_DEPTHWISE_3X3_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_int8.cc index 47385392..f044cd92 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_int8.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { ConvolutionDepthwiseInt8CPUKernel::~ConvolutionDepthwiseInt8CPUKernel() { if (packed_weight_sub_ != nullptr) { free(packed_weight_sub_); @@ -194,4 +194,4 @@ int ConvolutionDepthwiseInt8CPUKernel::Run() { row_buffer_ = nullptr; return ret; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_int8.h index 0164c0f2..d18e3fac 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_int8.h @@ -22,7 +22,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwiseInt8CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -44,6 +44,6 @@ class ConvolutionDepthwiseInt8CPUKernel : public ConvolutionBaseCPUKernel { int8_t *output_ptr_ = nullptr; int32_t *row_buffer_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_CONVOLUTION_DEPTHWISE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_slidewindow_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_slidewindow_int8.cc index d1b772d5..a6963bcb 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_slidewindow_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_slidewindow_int8.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { ConvolutionDepthwiseSWInt8CPUKernel::~ConvolutionDepthwiseSWInt8CPUKernel() { if (sliding_ != nullptr) { delete sliding_; @@ -368,4 +368,4 @@ void ConvolutionDepthwiseSWInt8CPUKernel::FreePackedInputOutput() { packed_output_ = nullptr; } } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_slidewindow_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_slidewindow_int8.h index 61d27ac7..c94d505b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_slidewindow_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_depthwise_slidewindow_int8.h @@ -23,7 +23,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionDepthwiseSWInt8CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseSWInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -57,6 +57,6 @@ class ConvolutionDepthwiseSWInt8CPUKernel : public ConvolutionBaseCPUKernel { int32_t *output_zp_ = nullptr; float *output_scale_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_CONVOLUTION_DEPTHWISE_SLIDEWINDOW_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8.cc index 2d6b7fe2..a9d14a89 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void ConvolutionInt8CPUKernel::CheckSupportOptimize() { tile_num_ = 8; #ifdef ENABLE_ARM32 @@ -261,4 +261,4 @@ int ConvolutionInt8CPUKernel::Run() { FreeTmpBuffer(); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8.h index 32d771fd..8f4a527a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8.h @@ -23,7 +23,7 @@ #include "src/common/utils.h" #include "nnacl_c/int8/conv_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ConvolutionInt8CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -82,6 +82,6 @@ class ConvolutionInt8CPUKernel : public ConvolutionBaseCPUKernel { int8_t *tmp_out_ = nullptr; MATMUL_OPT_R_FUNC matmul_func_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_CONVOLUTION_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8_creator.cc b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8_creator.cc index 313e37b7..14e36e14 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8_creator.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8_creator.cc @@ -33,7 +33,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2DFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kWinogradConvHW = 3; } // namespace @@ -140,4 +140,4 @@ kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector & } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CpuConvInt8KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8_creator.h b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8_creator.h index f3f7916b..f03b2a51 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8_creator.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/convolution_int8_creator.h @@ -21,7 +21,7 @@ #include "nnacl_c/op_base.h" #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { LiteKernel *CpuConvInt8KernelSelect(const std::vector &inputs, const std::vector &outputs, OpParameter *op_parameter, const lite::InnerContext *ctx); diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/crop_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/crop_int8.cc index 8cc7ef1b..d58dfea7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/crop_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/crop_int8.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Crop; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int CropInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C1NUM); CHECK_LESS_RETURN(out_tensors_.size(), C1NUM); @@ -98,4 +98,4 @@ void CropInt8CPUKernel::DoExecute(int task_id) { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Crop, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/crop_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/crop_int8.h index a5ac9dc2..6835c8fc 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/crop_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/crop_int8.h @@ -23,7 +23,7 @@ #include "nnacl_c/int8/crop_int8.h" #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CropInt8CPUKernel : public LiteKernel { public: CropInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -45,6 +45,6 @@ class CropInt8CPUKernel : public LiteKernel { }; int CropInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_CROP_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_depthwise_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_depthwise_int8.cc index eea6e79e..4e63bfdd 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_depthwise_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_depthwise_int8.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { DeconvolutionDepthwiseInt8CPUKernel::~DeconvolutionDepthwiseInt8CPUKernel() { if (sliding_ != nullptr) { delete sliding_; @@ -265,4 +265,4 @@ int DeconvolutionDepthwiseInt8CPUKernel::Run() { output_buffer_ = nullptr; return ret; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_depthwise_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_depthwise_int8.h index 3f5492be..99d8e89c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_depthwise_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_depthwise_int8.h @@ -22,7 +22,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl_c/fp32/conv_depthwise_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DeconvolutionDepthwiseInt8CPUKernel : public ConvolutionBaseCPUKernel { public: DeconvolutionDepthwiseInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,6 +47,6 @@ class DeconvolutionDepthwiseInt8CPUKernel : public ConvolutionBaseCPUKernel { int32_t *output_buffer_ = nullptr; bool need_align_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_DECONVOLUTION_DEPTHWISE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_int8.cc index db986871..9a762bca 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_int8.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { DeConvInt8CPUKernel::~DeConvInt8CPUKernel() { FreeTmpBuffer(); ConvolutionBaseCPUKernel::FreeQuantParam(); @@ -358,4 +358,4 @@ kernel::LiteKernel *CpuDeConvInt8KernelCreator(const std::vector } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Conv2dTransposeFusion, CpuDeConvInt8KernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_int8.h index 55f2bf5d..624fda59 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/deconvolution_int8.h @@ -28,7 +28,7 @@ #include "src/litert/kernel/cpu/base/layout_transform.h" #include "src/litert/kernel/cpu/base/convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DeConvInt8CPUKernel : public ConvolutionBaseCPUKernel { public: DeConvInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -65,5 +65,5 @@ class DeConvInt8CPUKernel : public ConvolutionBaseCPUKernel { MatMulParameter *matmul_param_ = nullptr; bool support_optimize_ = true; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_DECONVOLUTION_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/depth_to_space_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/depth_to_space_int8.cc index ff893b42..c67fb2f5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/depth_to_space_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/depth_to_space_int8.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_DepthToSpace; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { DepthToSpaceInt8CPUKernel::~DepthToSpaceInt8CPUKernel() { if (in_quant_arg_ != nullptr) { free(in_quant_arg_); @@ -114,4 +114,4 @@ int DepthToSpaceInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_DepthToSpace, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/depth_to_space_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/depth_to_space_int8.h index cf019000..b21e7985 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/depth_to_space_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/depth_to_space_int8.h @@ -24,7 +24,7 @@ #include "nnacl_c/int8/quantize.h" #include "nnacl_c/kernel/depth_to_space.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DepthToSpaceInt8CPUKernel : public LiteKernel { public: DepthToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -41,6 +41,6 @@ class DepthToSpaceInt8CPUKernel : public LiteKernel { QuantArg *out_quant_arg_ = nullptr; DepthToSpaceArgs args_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_DEPTH_TO_SPACE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/detection_post_process_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/detection_post_process_int8.cc index a4c43b57..80e62294 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/detection_post_process_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/detection_post_process_int8.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_DetectionPostProcess; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int DetectionPostProcessInt8CPUKernel::DequantizeInt8ToFp32(const int task_id) { int num_unit_thread = MSMIN(thread_n_stride_, quant_size_ - task_id * thread_n_stride_); int thread_offset = task_id * thread_n_stride_; @@ -135,4 +135,4 @@ void DetectionPostProcessInt8CPUKernel::FreeAllocatedBuffer() { REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_DetectionPostProcess, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/detection_post_process_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/detection_post_process_int8.h index 569d1591..c3ddf51b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/detection_post_process_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/detection_post_process_int8.h @@ -24,7 +24,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DetectionPostProcessInt8CPUKernel : public DetectionPostProcessBaseCPUKernel { public: DetectionPostProcessInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -45,5 +45,5 @@ class DetectionPostProcessInt8CPUKernel : public DetectionPostProcessBaseCPUKern int quant_size_ = 0; int thread_n_stride_ = 0; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_DETECTION_POST_PROCESS_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/div_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/div_int8.cc index e2c8d868..37544186 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/div_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/div_int8.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_DivFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { DivInt8CPUKernel::~DivInt8CPUKernel() { if (quant_args_ != nullptr) { free(quant_args_); @@ -216,4 +216,4 @@ int DivInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_DivFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/div_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/div_int8.h index ddebb074..bf4a730a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/div_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/div_int8.h @@ -20,7 +20,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/int8/div_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DivInt8CPUKernel : public LiteKernel { public: explicit DivInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -41,6 +41,6 @@ class DivInt8CPUKernel : public LiteKernel { bool broadcast_ = false; bool div_scalar_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_DIV_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_gather_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_gather_int8.cc index e86c84ec..81a7e45a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_gather_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_gather_int8.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Gather; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { DynamicGatherInt8CPUKernel::~DynamicGatherInt8CPUKernel() { if (quant_param_ != nullptr) { if (quant_param_->zp_in_ != nullptr) { @@ -247,4 +247,4 @@ int DynamicGatherInt8CPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_gather_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_gather_int8.h index 8fe495fe..468c4a7c 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_gather_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_gather_int8.h @@ -22,7 +22,7 @@ #include "nnacl_c/int8/quantize.h" #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DynamicGatherInt8CPUKernel : public LiteKernel { public: DynamicGatherInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -49,6 +49,6 @@ class DynamicGatherInt8CPUKernel : public LiteKernel { bool enable_fp16_ = false; DynamicGatherQuantArg *quant_param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_DYNAMIC_GATHER_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_quant.cc b/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_quant.cc index daa79ff2..2724e971 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_quant.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_quant.cc @@ -32,7 +32,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_DynamicQuant; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kBucketNums = 8; constexpr int k8Bit = 8; @@ -397,4 +397,4 @@ kernel::LiteKernel *DynamicQuantCPUCreator(const std::vector &in } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_DynamicQuant, DynamicQuantCPUCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_quant.h b/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_quant.h index 137e3d0f..89f25ace 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_quant.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/dynamic_quant.h @@ -23,7 +23,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/dynamic_quant_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DynamicQuantCPUKernel : public LiteKernel { public: DynamicQuantCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -57,6 +57,6 @@ class DynamicQuantCPUKernel : public LiteKernel { int unit_segment_num_{0}; bool need_transpose_{false}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_BASE_DYNAMIC_QUANT_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/fullconnection_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/fullconnection_int8.cc index 9a949832..aa5f29ef 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/fullconnection_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/fullconnection_int8.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_FullConnection; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int FullconnectionInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C2NUM); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -67,4 +67,4 @@ int FullconnectionInt8CPUKernel::ReSize() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_FullConnection, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/fullconnection_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/fullconnection_int8.h index d3c54e40..6ddc6b3a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/fullconnection_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/fullconnection_int8.h @@ -21,7 +21,7 @@ #include "include/errorcode.h" #include "src/litert/kernel/cpu/int8/matmul_base_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class FullconnectionInt8CPUKernel : public MatmulBaseInt8CPUKernel { public: FullconnectionInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -31,6 +31,6 @@ class FullconnectionInt8CPUKernel : public MatmulBaseInt8CPUKernel { int Prepare() override; int ReSize() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_FULLCONNECTION_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/gatherNd_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/gatherNd_int8.cc index 40827d17..1091ba49 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/gatherNd_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/gatherNd_int8.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_GatherNd; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { GatherNdInt8CPUKernel::~GatherNdInt8CPUKernel() { if (in_offset_ != nullptr) { free(in_offset_); @@ -182,4 +182,4 @@ int GatherNdInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_GatherNd, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/gatherNd_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/gatherNd_int8.h index 0d16b7dd..f2d6e7fe 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/gatherNd_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/gatherNd_int8.h @@ -21,7 +21,7 @@ #include "nnacl_c/int8/quantize.h" #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class GatherNdInt8CPUKernel : public LiteKernel { public: GatherNdInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -46,6 +46,6 @@ class GatherNdInt8CPUKernel : public LiteKernel { int8_t *out_ptr_ = nullptr; GatherQuantArg param_ = {}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_GATHERND_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/gather_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/gather_int8.cc index 3096adc9..5d122518 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/gather_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/gather_int8.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Gather; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int GatherInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C2NUM); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -184,4 +184,4 @@ kernel::LiteKernel *GatherInt8CPUKernelCreator(const std::vector } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Gather, GatherInt8CPUKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/gather_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/gather_int8.h index 8f72bcd9..f08acc54 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/gather_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/gather_int8.h @@ -22,7 +22,7 @@ #include "nnacl_c/int8/quantize.h" #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class GatherInt8CPUKernel : public LiteKernel { public: GatherInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class GatherInt8CPUKernel : public LiteKernel { int axis_{0}; GatherQuantArg param_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_GATHER_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/group_convolution_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/group_convolution_int8.cc index 169439a8..aa2c9b2f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/group_convolution_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/group_convolution_int8.cc @@ -20,7 +20,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int GroupConvolutionInt8CPUKernel::Separate(const int &task_id) const { auto plane_step = UP_DIV(in_plane_, in_thread_num_); MS_CHECK_INT_MUL_NOT_OVERFLOW(plane_step, task_id, RET_ERROR); @@ -127,4 +127,4 @@ int GroupConvolutionInt8CPUKernel::Prepare() { } return GroupConvolutionBaseCPUKernel::Prepare(); } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/group_convolution_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/group_convolution_int8.h index c3d3bd9f..b2a1e171 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/group_convolution_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/group_convolution_int8.h @@ -23,7 +23,7 @@ #include "nnacl_c/op_base.h" #include "src/litert/kernel/cpu/base/group_convolution_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class GroupConvolutionInt8CPUKernel : public GroupConvolutionBaseCPUKernel { public: GroupConvolutionInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -46,6 +46,6 @@ class GroupConvolutionInt8CPUKernel : public GroupConvolutionBaseCPUKernel { int8_t *sub_out_src_ = nullptr; int8_t *sub_out_dst_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_GROUP_CONVOLUTION_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/hswish_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/hswish_int8.cc index 6a3c9d1e..d37a68be 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/hswish_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/hswish_int8.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::ActivationType_HSWISH; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int HswishInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C1NUM); CHECK_LESS_RETURN(out_tensors_.size(), C1NUM); @@ -111,4 +111,4 @@ int HswishInt8CPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/hswish_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/hswish_int8.h index e5448aec..6d1c0c90 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/hswish_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/hswish_int8.h @@ -22,7 +22,7 @@ #include "nnacl_c/int8/hswish_int8.h" #include "nnacl_c/int8/quantize.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class HswishInt8CPUKernel : public LiteKernel { public: HswishInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class HswishInt8CPUKernel : public LiteKernel { HswishQuantArg quant_arg_ = {}; void MultiplierInt32ToInt16(int32_t input, int16_t *output) const; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_HSWISH_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/l2_norm_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/l2_norm_int8.cc index bf892cf3..13667803 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/l2_norm_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/l2_norm_int8.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_L2NormalizeFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { L2NormInt8CPUKernel::~L2NormInt8CPUKernel() { if (quant_param_ != nullptr) { free(quant_param_); @@ -107,4 +107,4 @@ int L2NormInt8CPUKernel::DoExecute(int task_id) { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_L2NormalizeFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/l2_norm_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/l2_norm_int8.h index 4d50c56e..013c91b7 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/l2_norm_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/l2_norm_int8.h @@ -20,7 +20,7 @@ #include "src/litert/kernel/cpu/fp32/l2_norm_fp32.h" #include "nnacl_c/int8/l2_norm_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class L2NormInt8CPUKernel : public L2NormCPUKernel { public: explicit L2NormInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,6 +35,6 @@ class L2NormInt8CPUKernel : public L2NormCPUKernel { private: L2NormQuantArg *quant_param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_L2_NORM_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/layer_norm_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/layer_norm_int8.cc index b1c43283..e695a253 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/layer_norm_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/layer_norm_int8.cc @@ -21,7 +21,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LayerNormFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int min_layernorm_input = 3; constexpr int min_layernorm_output = 1; @@ -202,4 +202,4 @@ int LayerNormInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_LayerNormFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/layer_norm_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/layer_norm_int8.h index cebae25a..ebd7f334 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/layer_norm_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/layer_norm_int8.h @@ -23,7 +23,7 @@ #include "include/errorcode.h" #include "nnacl_c/kernel/layer_norm.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class LayerNormInt8CPUKernel : public LiteKernel { public: LayerNormInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -52,6 +52,6 @@ class LayerNormInt8CPUKernel : public LiteKernel { float *gamma_ptr_ = nullptr; float *beta_ptr_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_LAYER_NORM_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/leaky_relu_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/leaky_relu_int8.cc index b528b8dc..1ee5c7fa 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/leaky_relu_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/leaky_relu_int8.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LeakyRelu; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int LeakyReluInt8Run(void *cdata, int task_id, float, float) { if (cdata == nullptr) { @@ -108,4 +108,4 @@ int LeakyReluInt8CPUKernel::DoExecute(int task_id) { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_LeakyRelu, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/leaky_relu_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/leaky_relu_int8.h index 1cb72ed7..241b290b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/leaky_relu_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/leaky_relu_int8.h @@ -24,7 +24,7 @@ #include "nnacl_c/int8/leaky_relu_int8.h" #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class LeakyReluInt8CPUKernel : public LiteKernel { public: LeakyReluInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class LeakyReluInt8CPUKernel : public LiteKernel { private: LeakyReluQuantArg quant_prelu_parm_ = {}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_LEAKY_RELU_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_base_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_base_int8.cc index 9a020da1..20af6cc2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_base_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_base_int8.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int MatmulBaseInt8Run(void *cdata, int task_id, float, float) { CHECK_NULL_RETURN(cdata); auto op = reinterpret_cast(cdata); @@ -569,4 +569,4 @@ int MatmulBaseInt8CPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_base_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_base_int8.h index 06a5bf3d..fb121148 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_base_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_base_int8.h @@ -26,7 +26,7 @@ #include "nnacl_c/int8/common_func_int8.h" #include "nnacl_c/int8/matmul_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatmulBaseInt8CPUKernel : public LiteKernel { typedef void (*PackFunc)(const int8_t *src, int8_t *dst, int row, int col); @@ -95,6 +95,6 @@ class MatmulBaseInt8CPUKernel : public LiteKernel { std::vector a_offset_; std::vector b_offset_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_MATMUL_BASE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_base_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_base_int8.cc index ce95c450..a6b17eeb 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_base_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_base_int8.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int kHasBiasSize = 3; constexpr int kMinInputSize = 2; @@ -441,4 +441,4 @@ int MatmulDynamicBaseInt8CPUKernel::PreparePackedWeight(const lite::Tensor *tens weight_sums_tensor_ = tensor; return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_base_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_base_int8.h index 42e0da55..3c1b16f5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_base_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_base_int8.h @@ -27,7 +27,7 @@ #include "nnacl_c/int8/common_func_int8.h" #include "src/common/common.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatmulDynamicBaseInt8CPUKernel : public LiteKernel { public: MatmulDynamicBaseInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -97,6 +97,6 @@ class MatmulDynamicBaseInt8CPUKernel : public LiteKernel { bool weight_is_packed_ = false; const lite::Tensor *weight_sums_tensor_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_MATMUL_DYNAMIC_BASE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_int8.cc index 1c8ba263..c02d9e0e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_int8.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int MatmulDynamicInt8Run(void *cdata, int task_id, float, float) { CHECK_NULL_RETURN(cdata); @@ -145,4 +145,4 @@ int MatmulDynamicInt8CPUKernel::Run() { FreeMatrixABuffer(); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_int8.h index e511cc52..e1f6369a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_int8.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/int8/matmul_dynamic_base_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatmulDynamicInt8CPUKernel : public MatmulDynamicBaseInt8CPUKernel { public: MatmulDynamicInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -37,6 +37,6 @@ class MatmulDynamicInt8CPUKernel : public MatmulDynamicBaseInt8CPUKernel { PackFunc a_pack_func_{nullptr}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_MATMUL_DYNAMIC_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_sdot_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_sdot_int8.cc index b2a6ef1b..bf92673d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_sdot_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_sdot_int8.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { int Arm64SdotPreRun(void *cdata, int task_id, float, float) { CHECK_NULL_RETURN(cdata); @@ -278,4 +278,4 @@ int MatMulDynamicSdotInt8Kernel::Run() { FreeMatrixABuffer(); return ret; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_sdot_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_sdot_int8.h index 2309dfd9..ecf0e4a5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_sdot_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_dynamic_sdot_int8.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/cpu/int8/matmul_dynamic_base_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatMulDynamicSdotInt8Kernel : public MatmulDynamicBaseInt8CPUKernel { public: MatMulDynamicSdotInt8Kernel(OpParameter *parameter, const std::vector &inputs, @@ -53,6 +53,6 @@ class MatMulDynamicSdotInt8Kernel : public MatmulDynamicBaseInt8CPUKernel { DynamicMatmulComputer dynamic_matmul_compute_fp16{nullptr}; #endif }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_MATMUL_DYNAMIC_SDOT_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_int8.cc index fec4a805..e7ff2319 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_int8.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_MatMulFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int min_matmul_input = 2; constexpr int min_matmul_output = 1; @@ -113,4 +113,4 @@ kernel::LiteKernel *MatmulInt8CPUKernelCreator(const std::vector } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_MatMulFusion, MatmulInt8CPUKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_int8.h index 55bc989e..c6ffb0e5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/matmul_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/matmul_int8.h @@ -23,7 +23,7 @@ #include "src/litert/lite_kernel.h" #include "src/litert/kernel/cpu/int8/matmul_base_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatmulInt8CPUKernel : public MatmulBaseInt8CPUKernel { public: MatmulInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,6 +35,6 @@ class MatmulInt8CPUKernel : public MatmulBaseInt8CPUKernel { int Prepare() override; int ReSize() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_MATMUL_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/mul_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/mul_int8.cc index ccd0bfd7..0d2e0088 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/mul_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/mul_int8.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_MulFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { MulInt8CPUKernel::~MulInt8CPUKernel() { if (quant_args_ != nullptr) { free(quant_args_); @@ -240,4 +240,4 @@ void MulInt8CPUKernel::DoExecute(int task_id) { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_MulFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/mul_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/mul_int8.h index 0562a727..cf5be390 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/mul_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/mul_int8.h @@ -24,7 +24,7 @@ #include "nnacl_c/int8/mul_int8.h" #include "nnacl_c/int8/arithmetic_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MulInt8CPUKernel : public LiteKernel { public: explicit MulInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -58,6 +58,6 @@ class MulInt8CPUKernel : public LiteKernel { int MulInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale); int FastHWBroadcastMulInt8Run(void *cdata, int task_id, float lhs_scale, float rhs_scale); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_MUL_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/pad_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/pad_int8.cc index 27c1f8f5..92e39102 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/pad_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/pad_int8.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::KernelRegistrar; using mindspore::schema::PrimitiveType_PadFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr size_t kMirrorPadInputSize = 2; } @@ -324,4 +324,4 @@ int PadInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_PadFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/pad_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/pad_int8.h index 418d3ded..d1d50404 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/pad_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/pad_int8.h @@ -25,7 +25,7 @@ #include "nnacl_c/int8/pad_int8.h" #include "nnacl_c/int8/quantize.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class PadInt8CPUKernel : public LiteKernel { public: explicit PadInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -67,5 +67,5 @@ class PadInt8CPUKernel : public LiteKernel { int mirror_offset_ = 0; PadQuantArg pad_quant_arg_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_PAD_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/pooling_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/pooling_int8.cc index 560a8561..26aebaf8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/pooling_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/pooling_int8.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_MEMORY_FAILED; using mindspore::schema::PrimitiveType_AvgPoolFusion; using mindspore::schema::PrimitiveType_MaxPoolFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int PoolingInt8CPUKernel::SetQuantParam() { // per tensor init pooling_quant_arg_ = reinterpret_cast(malloc(TWO_TENSOR * sizeof(QuantArg *))); @@ -201,4 +201,4 @@ int PoolingInt8CPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_AvgPoolFusion, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_MaxPoolFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/pooling_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/pooling_int8.h index 4c2f3dfa..394bb737 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/pooling_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/pooling_int8.h @@ -23,7 +23,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class PoolingInt8CPUKernel : public LiteKernel { public: PoolingInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,6 +47,6 @@ class PoolingInt8CPUKernel : public LiteKernel { PoolingParameter *pooling_param_ = nullptr; QuantArg **pooling_quant_arg_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_POOLING_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/power_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/power_int8.cc index d7cffb45..81003856 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/power_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/power_int8.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_PowFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int PowerInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -117,4 +117,4 @@ int PowerInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_PowFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/power_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/power_int8.h index e5e4532a..e0eb2120 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/power_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/power_int8.h @@ -22,7 +22,7 @@ #include "nnacl_c/int8/quantize.h" #include "nnacl_c/pow_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class PowerInt8CPUKernel : public LiteKernel { public: PowerInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -45,6 +45,6 @@ class PowerInt8CPUKernel : public LiteKernel { PowQuantArg quant_arg_; bool broadcast_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_POWER_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/reduce_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/reduce_int8.cc index 0ba3d0f8..002df9c3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/reduce_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/reduce_int8.cc @@ -36,7 +36,7 @@ using mindspore::schema::ReduceMode_ReduceSumSquare; using mindspore::kernel::KERNEL_ARCH; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { ReduceInt8CPUKernel::~ReduceInt8CPUKernel() { FreeMultipliers(); } void ReduceInt8CPUKernel::OneAxis() { @@ -599,4 +599,4 @@ int ReduceInt8CPUKernel::CallReduceUnit(int task_id) { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_ReduceFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/reduce_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/reduce_int8.h index a1dc3190..752c0d36 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/reduce_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/reduce_int8.h @@ -27,7 +27,7 @@ using mindspore::schema::ReduceMode; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { enum Four_DIMENSION_REDUCE_TEMPLATE { N, H, W, C, NH, NW, NC, HW, HC, WC, NHW, NHC, NWC, HWC, NHWC }; class ReduceInt8CPUKernel : public ReduceBaseCPUKernel { typedef int (*Reducer)(const int outer_size, const int inner_size, const int axis_size, const int32_t *src_data, @@ -81,6 +81,6 @@ class ReduceInt8CPUKernel : public ReduceBaseCPUKernel { std::vector prod_multipliers_; std::vector sum_square_multipliers_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_REDUCE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/relux_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/relux_int8.cc index 13975236..81e7acae 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/relux_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/relux_int8.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::ActivationType_RELU; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ReluXInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C1NUM); CHECK_LESS_RETURN(out_tensors_.size(), C1NUM); @@ -90,4 +90,4 @@ int ReluXInt8CPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/relux_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/relux_int8.h index bb9abb53..3e4a45a2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/relux_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/relux_int8.h @@ -22,7 +22,7 @@ #include "nnacl_c/fp32/activation_fp32.h" #include "nnacl_c/int8/relux_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr size_t kRelu6Min = 0; constexpr size_t kRelu6Max = 6; @@ -79,6 +79,6 @@ class Relu6Int8CPUKernel : public ReluXInt8CPUKernel { return ret; }; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_RELUX_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/reshape_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/reshape_int8.cc index 1df86f3c..14e2a49a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/reshape_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/reshape_int8.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::KernelRegistrar; using mindspore::schema::PrimitiveType_Reshape; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ReshapeInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -97,4 +97,4 @@ int ReshapeInt8CPUKernel::DoExecute(int task_id) { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Reshape, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/reshape_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/reshape_int8.h index 91f6e9ba..fbe4c889 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/reshape_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/reshape_int8.h @@ -23,7 +23,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ReshapeInt8CPUKernel : public LiteKernel { public: ReshapeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,6 +47,6 @@ class ReshapeInt8CPUKernel : public LiteKernel { }; int ReshapeInt8Run(void *cdata, int task_id, float, float); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_RESHAPE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/resize_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/resize_int8.cc index 582cdab5..9858e94d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/resize_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/resize_int8.cc @@ -32,7 +32,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::KernelRegistrar; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr unsigned int OFFSET_BASE = 10; } // namespace @@ -411,4 +411,4 @@ int ResizeInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Resize, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/resize_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/resize_int8.h index 3e38c803..191e040f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/resize_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/resize_int8.h @@ -24,7 +24,7 @@ using mindspore::schema::PrimitiveType_Resize; using mindspore::schema::ResizeMethod; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ResizeInt8CPUKernel : public ResizeBaseCPUKernel { public: ResizeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -55,6 +55,6 @@ class ResizeInt8CPUKernel : public ResizeBaseCPUKernel { ResizeQuantArg resize_quant_arg_ = {}; ResizeFloatScaleQuantArg resize_float_quant_arg_ = {}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_RESIZE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/scale_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/scale_int8.cc index fd5ff47e..f7ea3603 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/scale_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/scale_int8.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ScaleFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr size_t kScaleInputsSize = 2; constexpr size_t kScaleBiasInputsSize = 3; @@ -350,4 +350,4 @@ int ScaleInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_ScaleFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/scale_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/scale_int8.h index 9c569e9e..64c9ac43 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/scale_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/scale_int8.h @@ -25,7 +25,7 @@ #include "nnacl_c/int8/arithmetic_int8.h" #include "nnacl_c/int8/scale_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ScaleInt8CPUKernel : public LiteKernel { public: ScaleInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -62,6 +62,6 @@ class ScaleInt8CPUKernel : public LiteKernel { int InitQuantArgs(); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_SCALE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/sigmoid_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/sigmoid_int8.cc index ca4651ce..89b64f93 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/sigmoid_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/sigmoid_int8.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::ActivationType_SIGMOID; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void CalculateTableList(int8_t *table, const float input_scale, const int32_t input_zp, const float output_scale, const int32_t output_zp) { int32_t min_value = std::numeric_limits::min(); @@ -104,4 +104,4 @@ int SigmoidInt8CPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/sigmoid_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/sigmoid_int8.h index 68fcd2cc..7b9cd1ff 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/sigmoid_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/sigmoid_int8.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/int8/sigmoid_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SigmoidInt8CPUKernel : public LiteKernel { public: SigmoidInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -37,6 +37,6 @@ class SigmoidInt8CPUKernel : public LiteKernel { private: int8_t table_list_[256]{0}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_SIGMOID_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/slice_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/slice_int8.cc index ab71d4b5..f52eab13 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/slice_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/slice_int8.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SliceFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SliceInt8CPUKernel::ReSize() { InitSliceStruct(&slice_struct_, in_tensors_.front()->ConvertToTensorC(), in_tensors_.at(SECOND_INPUT)->ConvertToTensorC(), in_tensors_.at(THIRD_INPUT)->ConvertToTensorC()); @@ -108,4 +108,4 @@ int SliceInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_SliceFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/slice_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/slice_int8.h index 3ebc7536..18608f0e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/slice_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/slice_int8.h @@ -23,7 +23,7 @@ #include "nnacl_c/kernel/slice.h" #include "nnacl_c/slice_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SliceInt8CPUKernel : public LiteKernel { public: SliceInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class SliceInt8CPUKernel : public LiteKernel { SliceStruct slice_struct_; SliceQuantArg quant_arg_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_SLICE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/softmax_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/softmax_int8.cc index 34ea1f6f..64d28876 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/softmax_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/softmax_int8.cc @@ -28,7 +28,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_NULL_PTR; using mindspore::schema::PrimitiveType_Softmax; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { SoftmaxInt8CPUKernel::~SoftmaxInt8CPUKernel() { if (quant_param_ != nullptr) { free(quant_param_); @@ -178,4 +178,4 @@ int SoftmaxInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Softmax, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/softmax_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/softmax_int8.h index f6c0d0ac..c19964b6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/softmax_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/softmax_int8.h @@ -22,7 +22,7 @@ #include "nnacl_c/softmax_parameter.h" #include "nnacl_c/int8/quantize.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SoftmaxInt8CPUKernel : public LiteKernel { public: SoftmaxInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -46,6 +46,6 @@ class SoftmaxInt8CPUKernel : public LiteKernel { SoftmaxParameter *softmax_param_; SoftmaxQuantArg *quant_param_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_SOFTMAX_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/space_to_batch_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/space_to_batch_int8.cc index 0cb03a44..03c3bb69 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/space_to_batch_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/space_to_batch_int8.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SpaceToBatch; using mindspore::schema::PrimitiveType_SpaceToBatchND; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SpaceToBatchInt8CPUKernel::Run() { auto input_tensor = in_tensors_.at(0); auto output_tensor = out_tensors_.at(0); @@ -50,4 +50,4 @@ int SpaceToBatchInt8CPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_SpaceToBatch, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_SpaceToBatchND, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/space_to_batch_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/space_to_batch_int8.h index 6bba6e75..e1a901d2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/space_to_batch_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/space_to_batch_int8.h @@ -19,7 +19,7 @@ #include #include "src/litert/kernel/cpu/fp32/space_to_batch_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SpaceToBatchInt8CPUKernel : public SpaceToBatchCPUKernel { public: SpaceToBatchInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,6 +30,6 @@ class SpaceToBatchInt8CPUKernel : public SpaceToBatchCPUKernel { int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_SPACE_TO_BATCH_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/split_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/split_int8.cc index 87f8ae0a..b0991031 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/split_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/split_int8.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::KernelRegistrar; using mindspore::schema::PrimitiveType_Split; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SplitInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C1NUM); CHECK_LESS_RETURN(out_tensors_.size(), C1NUM); @@ -123,4 +123,4 @@ int SplitInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Split, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/split_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/split_int8.h index 637fdb23..9211b27d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/split_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/split_int8.h @@ -23,7 +23,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SplitInt8CPUKernel : public SplitBaseCPUKernel { public: SplitInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class SplitInt8CPUKernel : public SplitBaseCPUKernel { int8_t *input_ptr_{nullptr}; std::vector output_ptr_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_SPLIT_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/squeeze_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/squeeze_int8.cc index 1a1b0b9c..414caf7d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/squeeze_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/squeeze_int8.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Squeeze; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { SqueezeInt8CPUKernel::~SqueezeInt8CPUKernel() { if (quant_squeeze_param_ != nullptr) { if (quant_squeeze_param_->in_quant_args_ != nullptr) { @@ -127,4 +127,4 @@ void SqueezeInt8CPUKernel::DoExecute(int task_id) { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Squeeze, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/squeeze_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/squeeze_int8.h index 28380f78..8da14126 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/squeeze_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/squeeze_int8.h @@ -24,7 +24,7 @@ #include "nnacl_c/squeeze_parameter.h" using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SqueezeInt8CPUKernel : public LiteKernel { public: SqueezeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -42,6 +42,6 @@ class SqueezeInt8CPUKernel : public LiteKernel { }; int SqueezeInt8Run(void *cdata, int task_id, float, float); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_SQUEEZE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/sub_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/sub_int8.cc index 4be0c59b..00b27f22 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/sub_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/sub_int8.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SubFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { SubInt8CPUKernel::~SubInt8CPUKernel() { if (quant_param_ != nullptr) { free(quant_param_); @@ -197,4 +197,4 @@ int SubInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_SubFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/sub_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/sub_int8.h index 77ecd6c8..6c9daa9d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/sub_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/sub_int8.h @@ -24,7 +24,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/int8/sub_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SubInt8CPUKernel : public LiteKernel { public: explicit SubInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -43,6 +43,6 @@ class SubInt8CPUKernel : public LiteKernel { int8_t *tile1_data_ = nullptr; bool broadcast_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_SUB_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/tanh_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/tanh_int8.cc index c0b58b2f..fbfd113a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/tanh_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/tanh_int8.cc @@ -19,7 +19,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int TanhInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C1NUM); CHECK_LESS_RETURN(out_tensors_.size(), C1NUM); @@ -91,4 +91,4 @@ int TanhInt8CPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/tanh_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/tanh_int8.h index f43454a0..7b56f809 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/tanh_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/tanh_int8.h @@ -25,7 +25,7 @@ #include "nnacl_c/int8/quantize.h" #include "include/errorcode.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TanhInt8CPUKernel : public LiteKernel { public: TanhInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -48,6 +48,6 @@ class TanhInt8CPUKernel : public LiteKernel { int thread_stride_{0}; TanhQuantParameter tanh_quant_ = {}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_TANH_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/topk_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/topk_int8.cc index 3179cd77..3cbd2669 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/topk_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/topk_int8.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_TopKFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int TopKInt8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C1NUM); CHECK_LESS_RETURN(out_tensors_.size(), C1NUM); @@ -79,4 +79,4 @@ int TopKInt8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_TopKFusion, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/topk_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/topk_int8.h index 2250ef63..36bcb1eb 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/topk_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/topk_int8.h @@ -20,7 +20,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/int8/topk_int8.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TopKInt8CPUKernel : public LiteKernel { public: explicit TopKInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,6 +35,6 @@ class TopKInt8CPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_TOPK_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/transpose_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/transpose_int8.cc index 6a92baab..6622fc58 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/transpose_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/transpose_int8.cc @@ -23,7 +23,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Transpose; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int TransposeInt8CPUKernel::ReSize() { auto ret = TransposeBaseCPUKernel::ReSize(); if (ret != RET_OK) { @@ -54,4 +54,4 @@ int TransposeInt8CPUKernel::DoTransposeMultiThread(int task_id) { REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Transpose, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_Transpose, LiteKernelCreator) REG_KERNEL(kCPU, kNumberTypeUInt8, PrimitiveType_Transpose, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/transpose_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/transpose_int8.h index 70389623..3fccf71d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/transpose_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/transpose_int8.h @@ -19,7 +19,7 @@ #include #include "src/litert/kernel/cpu/base/transpose_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class TransposeInt8CPUKernel : public TransposeBaseCPUKernel { public: TransposeInt8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -33,6 +33,6 @@ class TransposeInt8CPUKernel : public TransposeBaseCPUKernel { private: int DoTransposeSingleThread() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_TRANSPOSE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/unsqueeze_int8.cc b/mindspore-lite/src/litert/kernel/cpu/int8/unsqueeze_int8.cc index e490ab0b..947113d6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/unsqueeze_int8.cc +++ b/mindspore-lite/src/litert/kernel/cpu/int8/unsqueeze_int8.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Unsqueeze; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int Unsqueezeint8CPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -108,4 +108,4 @@ int Unsqueezeint8CPUKernel::Run() { } REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Unsqueeze, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/int8/unsqueeze_int8.h b/mindspore-lite/src/litert/kernel/cpu/int8/unsqueeze_int8.h index 52996b42..572f1d12 100644 --- a/mindspore-lite/src/litert/kernel/cpu/int8/unsqueeze_int8.h +++ b/mindspore-lite/src/litert/kernel/cpu/int8/unsqueeze_int8.h @@ -23,7 +23,7 @@ using mindspore::lite::InnerContext; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class Unsqueezeint8CPUKernel : public LiteKernel { public: Unsqueezeint8CPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -48,6 +48,6 @@ class Unsqueezeint8CPUKernel : public LiteKernel { float *out_ptr_{nullptr}; int thread_count_{0}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_INT8_UNSQUEEZE_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/cxx_utils.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/cxx_utils.cc index 71c551b9..d8c8f937 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/cxx_utils.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/cxx_utils.cc @@ -23,7 +23,7 @@ #include "src/common/log_util.h" #include "include/errorcode.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { void *DefaultAllocatorMalloc(void *allocator, size_t sz) { if (allocator == nullptr) { MS_LOG(ERROR) << "in param invalid"; @@ -81,4 +81,4 @@ int DefaultUpdateThreadNumPass(int32_t kernel_type, int64_t per_unit_load_num, i #endif return update_thread; } -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/cxx_utils.h b/mindspore-lite/src/litert/kernel/cpu/nnacl/cxx_utils.h index ce8f3d57..73c1744a 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/cxx_utils.h +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/cxx_utils.h @@ -20,7 +20,7 @@ #include #include -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { void *DefaultAllocatorMalloc(void *allocator, size_t sz); void DefaultAllocatorFree(void *allocator, void *ptr); int DefaultThreadPoolParallelLunch(void *threadPool, void *task, void *param, int taskNr); @@ -28,5 +28,5 @@ void *DefaultGetSharingPackData(void *manager, const void *tensor_data, const si void DefaultFreeSharingPackData(void *manager, void *tensor_data); int DefaultUpdateThreadNumPass(int32_t kernel_type, int64_t per_unit_load_num, int64_t per_unit_store_num, int64_t unit_num, int thread_num); -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_LITERT_KERNEL_CPU_NNACL_CXX_UTILS_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_batchnorm.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_batchnorm.cc index e1f7671e..74352ace 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_batchnorm.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_batchnorm.cc @@ -21,7 +21,7 @@ using mindspore::schema::PrimitiveType_BatchNorm; -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { int BatchNormKernel::SetupVirtualBatch(int virtual_batch_multiplier, int momentum) { CHECK_NULL_RETURN(kernel_); BatchNormSetupVirtualBatch(kernel_, virtual_batch_multiplier, momentum); @@ -30,4 +30,4 @@ int BatchNormKernel::SetupVirtualBatch(int virtual_batch_multiplier, int momentu NNACL_KERNEL(PrimitiveType_BatchNorm, kNumberTypeFloat32, NNACLOpt) NNACL_KERNEL(PrimitiveType_BatchNorm, kNumberTypeFloat16, NNACLOpt) -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_batchnorm.h b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_batchnorm.h index b09c79fb..e2b81af3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_batchnorm.h +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_batchnorm.h @@ -20,7 +20,7 @@ #include #include "nnacl/nnacl_kernel.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { class BatchNormKernel : public NNACLKernel { public: explicit BatchNormKernel(OpParameter *parameter, const std::vector &inputs, @@ -29,5 +29,5 @@ class BatchNormKernel : public NNACLKernel { ~BatchNormKernel() override = default; int SetupVirtualBatch(int virtual_batch_multiplier, int momentum) override; }; -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_BATCHNORM_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_convolution.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_convolution.cc index 17f7fb97..07806292 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_convolution.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_convolution.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Conv2DFusion; -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { int ConvolutionKernel::Prepare() { if (kernel_ == nullptr) { return RET_ERROR; @@ -64,4 +64,4 @@ NNACLKernel *NNACLConvolutionOpt(OpParameter *parameter, const std::vector #include "nnacl/nnacl_kernel.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { class ConvolutionKernel : public NNACLKernel { public: explicit ConvolutionKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,5 +30,5 @@ class ConvolutionKernel : public NNACLKernel { int Prepare() override; int ReSize() override; }; -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_CONVOLUTION_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_fused_batch_norm.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_fused_batch_norm.cc index 2d9f635e..23a92f5e 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_fused_batch_norm.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_fused_batch_norm.cc @@ -22,7 +22,7 @@ using mindspore::schema::PrimitiveType_FusedBatchNorm; -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { int FusedBatchNormKernel::Eval() { auto ret = LiteKernel::Eval(); if (ret != RET_OK) { @@ -39,4 +39,4 @@ int FusedBatchNormKernel::Run() { NNACL_KERNEL(PrimitiveType_FusedBatchNorm, kNumberTypeFloat32, NNACLOpt) NNACL_KERNEL(PrimitiveType_FusedBatchNorm, kNumberTypeFloat16, NNACLOpt) -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_fused_batch_norm.h b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_fused_batch_norm.h index 943a5550..8cf358eb 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_fused_batch_norm.h +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_fused_batch_norm.h @@ -20,7 +20,7 @@ #include #include "nnacl/nnacl_kernel.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { class FusedBatchNormKernel : public NNACLKernel { public: explicit FusedBatchNormKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,5 +30,5 @@ class FusedBatchNormKernel : public NNACLKernel { int Eval() override; int Run() override; }; -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_FUSED_BATCHNORM_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_kernel.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_kernel.cc index e467a351..9f023615 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_kernel.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_kernel.cc @@ -22,7 +22,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { NNACLKernel::~NNACLKernel() { if (in_ != nullptr) { free(in_); @@ -179,4 +179,4 @@ int NNACLKernel::InitKernel(const TypeId &data_type, const lite::InnerContext *c kernel_->UpdateThread = DefaultUpdateThreadNumPass; return RET_OK; } -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_kernel.h b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_kernel.h index 23203686..b78fb087 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_kernel.h +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_kernel.h @@ -22,7 +22,7 @@ #include "src/executor/kernel_exec.h" #include "src/litert/lite_kernel.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { class NNACLKernel : public kernel::LiteKernel { public: explicit NNACLKernel(OpParameter *parameter, const std::vector &inputs, @@ -55,6 +55,6 @@ class NNACLKernel : public kernel::LiteKernel { size_t in_size_ = 0; size_t out_size_ = 0; }; -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_KERNEL_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_manager.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_manager.cc index 292ef37b..829fd88d 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_manager.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_manager.cc @@ -16,7 +16,7 @@ #include "nnacl/nnacl_manager.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { bool NNACLSupportKernel(int op_type, TypeId data_type) { auto creator = KernelRegistry::GetInstance()->Creator({op_type, data_type}); if (creator != nullptr) { @@ -51,4 +51,4 @@ NNACLKernel *NNACLKernelRegistry(OpParameter *parameter, const std::vector #include "nnacl/nnacl_kernel.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { struct KeyDesc { int op_; TypeId dt_; @@ -73,5 +73,5 @@ bool NNACLSupportKernel(int op_type, TypeId data_type); NNACLKernel *NNACLKernelRegistry(OpParameter *parameter, const std::vector &inputs, const std::vector &outputs, const lite::InnerContext *ctx, const kernel::KernelKey &key); -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_KERNEL_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_matmul.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_matmul.cc index 913611c5..5524acb6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_matmul.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_matmul.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_FullConnection; using mindspore::schema::PrimitiveType_MatMulFusion; -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { int MatmulKernel::Prepare() { if (kernel_ == nullptr) { return RET_ERROR; @@ -84,4 +84,4 @@ int MatmulKernel::PreparePackedWeight(const lite::Tensor *tensor) { NNACL_KERNEL(PrimitiveType_MatMulFusion, kNumberTypeFloat32, NNACLOpt) NNACL_KERNEL(PrimitiveType_FullConnection, kNumberTypeFloat32, NNACLOpt) -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_matmul.h b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_matmul.h index 7e10fc20..34854c16 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_matmul.h +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_matmul.h @@ -21,7 +21,7 @@ #include "nnacl/nnacl_kernel.h" #include "nnacl_c/matmul_parameter.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { class MatmulKernel : public NNACLKernel { public: explicit MatmulKernel(OpParameter *parameter, const std::vector &inputs, @@ -32,5 +32,5 @@ class MatmulKernel : public NNACLKernel { int Prepare() override; int PreparePackedWeight(const lite::Tensor *tensor) override; }; -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_MATMUL_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_non_max_suppression.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_non_max_suppression.cc index 30b1dd43..40be1274 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_non_max_suppression.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_non_max_suppression.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_NonMaxSuppression; -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { int NonMaxSuppressionKernel::PreProcess() { return RET_OK; } @@ -37,4 +37,4 @@ int NonMaxSuppressionKernel::Run() { } NNACL_KERNEL(PrimitiveType_NonMaxSuppression, kNumberTypeFloat32, NNACLOpt) -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_non_max_suppression.h b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_non_max_suppression.h index fa219765..e3924e18 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_non_max_suppression.h +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_non_max_suppression.h @@ -20,7 +20,7 @@ #include #include "nnacl/nnacl_kernel.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { class NonMaxSuppressionKernel : public NNACLKernel { public: explicit NonMaxSuppressionKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,6 +30,6 @@ class NonMaxSuppressionKernel : public NNACLKernel { int Run() override; int PreProcess() override; }; -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_NON_MAX_SUPPRESSION_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reduce.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reduce.cc index a1d2f3d3..7efd2cf2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reduce.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reduce.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ReduceFusion; -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { int ReduceKernel::Run() { ReduceStruct *reduce = reinterpret_cast(kernel_); CHECK_NULL_RETURN(reduce); @@ -40,4 +40,4 @@ int ReduceKernel::Run() { NNACL_KERNEL(PrimitiveType_ReduceFusion, kNumberTypeFloat32, NNACLOpt) NNACL_KERNEL(PrimitiveType_ReduceFusion, kNumberTypeInt32, NNACLOpt) NNACL_KERNEL(PrimitiveType_ReduceFusion, kNumberTypeBool, NNACLOpt) -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reduce.h b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reduce.h index e6be2f5b..ec4033fe 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reduce.h +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reduce.h @@ -20,7 +20,7 @@ #include #include "nnacl/nnacl_kernel.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { class ReduceKernel : public NNACLKernel { public: explicit ReduceKernel(OpParameter *parameter, const std::vector &inputs, @@ -29,6 +29,6 @@ class ReduceKernel : public NNACLKernel { ~ReduceKernel() override = default; int Run() override; }; -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_REDUCE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reshape.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reshape.cc index fb45565f..f5aa0371 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reshape.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reshape.cc @@ -29,7 +29,7 @@ using mindspore::schema::PrimitiveType_Reshape; using mindspore::schema::PrimitiveType_Squeeze; using mindspore::schema::PrimitiveType_Unsqueeze; -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { int ReshapeKernel::Run() { auto in_tensor = in_tensors().front(); CHECK_NULL_RETURN(in_tensor); @@ -77,4 +77,4 @@ NNACL_KERNEL(PrimitiveType_Unsqueeze, kNumberTypeInt32, NNACLOpt) NNACL_KERNEL(PrimitiveType_Unsqueeze, kNumberTypeInt64, NNACLOpt) NNACL_KERNEL(PrimitiveType_Unsqueeze, kNumberTypeBool, NNACLOpt) NNACL_KERNEL(PrimitiveType_Unsqueeze, kNumberTypeUInt8, NNACLOpt) -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reshape.h b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reshape.h index b75fb960..11f10c8b 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reshape.h +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_reshape.h @@ -20,7 +20,7 @@ #include #include "nnacl/nnacl_kernel.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { class ReshapeKernel : public NNACLKernel { public: explicit ReshapeKernel(OpParameter *parameter, const std::vector &inputs, @@ -29,6 +29,6 @@ class ReshapeKernel : public NNACLKernel { ~ReshapeKernel() override = default; int Run() override; }; -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_RESHAPE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_strided_slice.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_strided_slice.cc index fb86fdc4..647275e5 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_strided_slice.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_strided_slice.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_StridedSlice; -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { int StridedSliceKernel::Run() { StridedSliceStruct *strided_slice = reinterpret_cast(kernel_); CHECK_NULL_RETURN(strided_slice); @@ -42,4 +42,4 @@ NNACL_KERNEL(PrimitiveType_StridedSlice, kNumberTypeFloat16, NNACLOpt) NNACL_KERNEL(PrimitiveType_StridedSlice, kNumberTypeInt32, NNACLOpt) NNACL_KERNEL(PrimitiveType_StridedSlice, kNumberTypeInt8, NNACLOpt) -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_strided_slice.h b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_strided_slice.h index 2659752a..203782ef 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_strided_slice.h +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_strided_slice.h @@ -20,7 +20,7 @@ #include #include "nnacl/nnacl_kernel.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { class StridedSliceKernel : public NNACLKernel { public: explicit StridedSliceKernel(OpParameter *parameter, const std::vector &inputs, @@ -29,6 +29,6 @@ class StridedSliceKernel : public NNACLKernel { ~StridedSliceKernel() override = default; int Run() override; }; -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_STRIDED_SLICE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_where.cc b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_where.cc index 985ba206..61344353 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_where.cc +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_where.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Where; -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { int WhereKernel::PreProcess() { if (in_tensors_.size() == Num3) { return LiteKernel::PreProcess(); @@ -48,4 +48,4 @@ NNACL_KERNEL(PrimitiveType_Where, kNumberTypeBool, NNACLOpt) NNACL_KERNEL(PrimitiveType_Where, kNumberTypeInt32, NNACLOpt) NNACL_KERNEL(PrimitiveType_Where, kNumberTypeFloat16, NNACLOpt) NNACL_KERNEL(PrimitiveType_Where, kNumberTypeFloat32, NNACLOpt) -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl diff --git a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_where.h b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_where.h index 8a4709a6..3f039c8f 100644 --- a/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_where.h +++ b/mindspore-lite/src/litert/kernel/cpu/nnacl/nnacl_where.h @@ -20,7 +20,7 @@ #include #include "nnacl/nnacl_kernel.h" -namespace mindspore::nnacl { +namespace mindspore::lite::nnacl { class WhereKernel : public NNACLKernel { public: explicit WhereKernel(OpParameter *parameter, const std::vector &inputs, @@ -30,6 +30,6 @@ class WhereKernel : public NNACLKernel { int PreProcess() override; int Run() override; }; -} // namespace mindspore::nnacl +} // namespace mindspore::lite::nnacl #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_NNACL_WHERE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/string/extract_feature.cc b/mindspore-lite/src/litert/kernel/cpu/string/extract_feature.cc index ac232113..919e62e2 100644 --- a/mindspore-lite/src/litert/kernel/cpu/string/extract_feature.cc +++ b/mindspore-lite/src/litert/kernel/cpu/string/extract_feature.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_CustomExtractFeatures; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ExtractFeatureCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), C2NUM); @@ -97,4 +97,4 @@ kernel::LiteKernel *CpuExtractFeatureKernelCreator(const std::vector &inputs, @@ -35,6 +35,6 @@ class ExtractFeatureCPUKernel : public LiteKernel { private: bool IsInBlacklist(const lite::StringPack &str); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_STRING_EXTRACT_FEATURE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/string/hashtable_lookup.cc b/mindspore-lite/src/litert/kernel/cpu/string/hashtable_lookup.cc index 890f37c7..98c913c3 100644 --- a/mindspore-lite/src/litert/kernel/cpu/string/hashtable_lookup.cc +++ b/mindspore-lite/src/litert/kernel/cpu/string/hashtable_lookup.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_HashtableLookup; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int HashtableLookupCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C3NUM); CHECK_LESS_RETURN(out_tensors_.size(), C2NUM); @@ -99,4 +99,4 @@ kernel::LiteKernel *CpuHashtableLookupKernelCreator(const std::vector #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class HashtableLookupCPUKernel : public LiteKernel { public: HashtableLookupCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -31,6 +31,6 @@ class HashtableLookupCPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_STRING_HASHTABLE_LOOKUP_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/string/lsh_projection.cc b/mindspore-lite/src/litert/kernel/cpu/string/lsh_projection.cc index f3be2e3f..bea40801 100644 --- a/mindspore-lite/src/litert/kernel/cpu/string/lsh_projection.cc +++ b/mindspore-lite/src/litert/kernel/cpu/string/lsh_projection.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LshProjection; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int LshProjectionCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), C2NUM); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -203,4 +203,4 @@ void LshProjectionCPUKernel::LshProjectionDense(const float *hashSeed, const int } REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LshProjection, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/string/lsh_projection.h b/mindspore-lite/src/litert/kernel/cpu/string/lsh_projection.h index 7f55b281..7f7e4df9 100644 --- a/mindspore-lite/src/litert/kernel/cpu/string/lsh_projection.h +++ b/mindspore-lite/src/litert/kernel/cpu/string/lsh_projection.h @@ -22,7 +22,7 @@ #include "nnacl_c/lsh_projection_parameter.h" #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class LshProjectionCPUKernel : public LiteKernel { public: LshProjectionCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -52,6 +52,6 @@ class LshProjectionCPUKernel : public LiteKernel { float *weight_ = nullptr; int32_t *output_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_STRING_LSH_PROJECTION_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/string/normalize.cc b/mindspore-lite/src/litert/kernel/cpu/string/normalize.cc index 38bc2c20..13f087f6 100644 --- a/mindspore-lite/src/litert/kernel/cpu/string/normalize.cc +++ b/mindspore-lite/src/litert/kernel/cpu/string/normalize.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_CustomNormalize; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const char kPunctuationsRegex[] = "[.*()\"]"; const std::map kRegexTransforms = { @@ -150,4 +150,4 @@ kernel::LiteKernel *CpuNormalizeKernelCreator(const std::vector } REG_KERNEL(kCPU, kObjectTypeString, PrimitiveType_CustomNormalize, CpuNormalizeKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/string/normalize.h b/mindspore-lite/src/litert/kernel/cpu/string/normalize.h index ff7d473e..dce7cb95 100644 --- a/mindspore-lite/src/litert/kernel/cpu/string/normalize.h +++ b/mindspore-lite/src/litert/kernel/cpu/string/normalize.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "src/common/string_utils.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class NormalizeCPUKernel : public LiteKernel { public: NormalizeCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -40,6 +40,6 @@ class NormalizeCPUKernel : public LiteKernel { std::vector normalized_strs; void FreeBuffer(); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_STRING_NORMALIZE_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/string/predict.cc b/mindspore-lite/src/litert/kernel/cpu/string/predict.cc index a4b517d2..7bce3937 100644 --- a/mindspore-lite/src/litert/kernel/cpu/string/predict.cc +++ b/mindspore-lite/src/litert/kernel/cpu/string/predict.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_CustomPredict; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { constexpr int INPUT_INDEX = 0; constexpr int KEY_INDEX = 1; @@ -146,4 +146,4 @@ kernel::LiteKernel *CpuPredictKernelCreator(const std::vector &i } REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_CustomPredict, CpuPredictKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/string/predict.h b/mindspore-lite/src/litert/kernel/cpu/string/predict.h index 9f5f6fa4..9f55dcd0 100644 --- a/mindspore-lite/src/litert/kernel/cpu/string/predict.h +++ b/mindspore-lite/src/litert/kernel/cpu/string/predict.h @@ -20,7 +20,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl_c/predict_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class PredictCPUKernel : public LiteKernel { public: PredictCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -35,6 +35,6 @@ class PredictCPUKernel : public LiteKernel { private: std::vector GetLabelInfo(); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_STRING_PREDICT_H_ diff --git a/mindspore-lite/src/litert/kernel/cpu/string/skip_gram.cc b/mindspore-lite/src/litert/kernel/cpu/string/skip_gram.cc index 54e62ee0..a86711cd 100644 --- a/mindspore-lite/src/litert/kernel/cpu/string/skip_gram.cc +++ b/mindspore-lite/src/litert/kernel/cpu/string/skip_gram.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::StringPack; using mindspore::schema::PrimitiveType_SkipGram; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SkipGramCPUKernel::Prepare() { CHECK_LESS_RETURN(in_tensors_.size(), 1); CHECK_LESS_RETURN(out_tensors_.size(), 1); @@ -113,4 +113,4 @@ int SkipGramCPUKernel::Run() { } REG_KERNEL(kCPU, kObjectTypeString, PrimitiveType_SkipGram, LiteKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/cpu/string/skip_gram.h b/mindspore-lite/src/litert/kernel/cpu/string/skip_gram.h index 9b32619b..7bb676d8 100644 --- a/mindspore-lite/src/litert/kernel/cpu/string/skip_gram.h +++ b/mindspore-lite/src/litert/kernel/cpu/string/skip_gram.h @@ -22,7 +22,7 @@ #include "nnacl_c/skip_gram_parameter.h" #include "src/common/string_utils.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SkipGramCPUKernel : public LiteKernel { public: @@ -42,6 +42,6 @@ class SkipGramCPUKernel : public LiteKernel { SkipGramParameter *skip_gram_parameter_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_CPU_STRING_SKIP_GRAM_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/activation.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/activation.cc index 1ad3e7af..b094b7d1 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/activation.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/activation.cc @@ -37,7 +37,7 @@ using mindspore::schema::ActivationType_SWISH; using mindspore::schema::ActivationType_TANH; using mindspore::schema::PrimitiveType_Activation; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { std::string ActivationOpenCLKernel::GetActTypeString(int act_type) { static std::map supported_act_type = { {ActivationType_LEAKY_RELU, "LeakyRelu"}, {ActivationType_RELU, "Relu"}, {ActivationType_SIGMOID, "Sigmoid"}, @@ -149,4 +149,4 @@ int ActivationOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Activation, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Activation, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/activation.h b/mindspore-lite/src/litert/kernel/opencl/kernel/activation.h index dcec9c9b..40184e26 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/activation.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/activation.h @@ -23,7 +23,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/fp32/activation_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ActivationOpenCLKernel : public OpenCLKernel { public: ActivationOpenCLKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,5 +47,5 @@ class ActivationOpenCLKernel : public OpenCLKernel { std::unique_ptr out_shape_ = nullptr; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_ACTIVATION_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/argminmax.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/argminmax.cc index da62eac0..ce898b3f 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/argminmax.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/argminmax.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_ArgMaxFusion; using mindspore::schema::PrimitiveType_ArgMinFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ArgMinMaxOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); @@ -245,4 +245,4 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ArgMinFusion, OpenCLKernelCre REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ArgMinFusion, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ArgMaxFusion, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ArgMaxFusion, OpenCLKernelCreator); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/argminmax.h b/mindspore-lite/src/litert/kernel/opencl/kernel/argminmax.h index 0fdfe87c..cb63130a 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/argminmax.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/argminmax.h @@ -22,7 +22,7 @@ #include "nnacl_c/arg_min_max_parameter.h" #include "nnacl_c/kernel/arg_min_max.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ArgMinMaxOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -50,5 +50,5 @@ class ArgMinMaxOpenCLKernel : public OpenCLKernel { cl_int4 strides_; ArgMinMaxComputeParam compute_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic.cc index 5ffc86b0..95cd800e 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic.cc @@ -55,7 +55,7 @@ using mindspore::schema::PrimitiveType_NotEqual; using mindspore::schema::PrimitiveType_SquaredDifference; using mindspore::schema::PrimitiveType_SubFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ArithmeticOpenCLKernel::CheckSpecsWithoutShape() { for (auto &tensor : in_tensors_) { if (tensor->data_type() != kNumberTypeFloat32 && tensor->data_type() != kNumberTypeFloat16) { @@ -365,4 +365,4 @@ REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Greater, OpenCLKernelCreator< REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_GreaterEqual, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Eltwise, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_BiasAdd, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic.h b/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic.h index cccd9ebd..3a5c6152 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic.h @@ -24,7 +24,7 @@ #include #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { extern std::set SupportedOpenCLArithmetics; class ArithmeticOpenCLKernel : public OpenCLKernel { @@ -52,6 +52,6 @@ class ArithmeticOpenCLKernel : public OpenCLKernel { std::vector weight_ptrs_; std::string kernel_name_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_ARITHMETIC_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic_self.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic_self.cc index d49e61c5..d8810c00 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic_self.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic_self.cc @@ -26,7 +26,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ArithmeticSelfOpenCLKernel::CheckSpecsWithoutShape() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); @@ -160,4 +160,4 @@ REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Sin, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Sqrt, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Square, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic_self.h b/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic_self.h index 68c28468..f1ea0ced 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic_self.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/arithmetic_self.h @@ -37,7 +37,7 @@ using mindspore::schema::PrimitiveType_Sin; using mindspore::schema::PrimitiveType_Sqrt; using mindspore::schema::PrimitiveType_Square; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ArithmeticSelfOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -63,5 +63,5 @@ class ArithmeticSelfOpenCLKernel : public OpenCLKernel { cl_int4 output_shape_ = {}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/batch_to_space_nd.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/batch_to_space_nd.cc index 88a86ef8..01404fd2 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/batch_to_space_nd.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/batch_to_space_nd.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_BatchToSpace; using mindspore::schema::PrimitiveType_BatchToSpaceND; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int BatchToSpaceNDOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); @@ -140,4 +140,4 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_BatchToSpaceND, OpenCLKernelC REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_BatchToSpaceND, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_BatchToSpace, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_BatchToSpace, OpenCLKernelCreator); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/batch_to_space_nd.h b/mindspore-lite/src/litert/kernel/opencl/kernel/batch_to_space_nd.h index a8e6049f..18ef7ced 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/batch_to_space_nd.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/batch_to_space_nd.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/batch_to_space_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BatchToSpaceNDOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -38,5 +38,5 @@ class BatchToSpaceNDOpenCLKernel : public OpenCLKernel { private: }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/batchnorm.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/batchnorm.cc index 6f240aeb..83de5a87 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/batchnorm.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/batchnorm.cc @@ -35,7 +35,7 @@ constexpr int kNumInput2 = 2; constexpr int kNumInput3 = 3; constexpr int kNumInput4 = 4; } // namespace -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int BatchNormOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_5 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); @@ -402,4 +402,4 @@ int BatchNormOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_BatchNorm, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_BatchNorm, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/batchnorm.h b/mindspore-lite/src/litert/kernel/opencl/kernel/batchnorm.h index 94f38404..930d57ea 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/batchnorm.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/batchnorm.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/fp32/batchnorm_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class BatchNormOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -49,5 +49,5 @@ class BatchNormOpenCLKernel : public OpenCLKernel { cl::Kernel kernel_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/cast.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/cast.cc index 77ddafd4..8f5dddf8 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/cast.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/cast.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Cast; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const std::map dtype_names = { {kNumberTypeFloat32, "fp32"}, @@ -119,4 +119,4 @@ int CastOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Cast, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Cast, OpenCLKernelCreator); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/cast.h b/mindspore-lite/src/litert/kernel/opencl/kernel/cast.h index 094f067c..84a10838 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/cast.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/cast.h @@ -21,7 +21,7 @@ #include #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CastOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -38,5 +38,5 @@ class CastOpenCLKernel : public OpenCLKernel { private: GpuTensorInfo shape_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/concat.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/concat.cc index bc9dcc62..5ee0452b 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/concat.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/concat.cc @@ -31,7 +31,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::opencl::ImageSize; using mindspore::schema::PrimitiveType_Concat; -namespace mindspore { +namespace mindspore::lite { namespace kernel { int ConcatOpenCLKernel::RunAxis0() { auto allocator_ = ocl_runtime_->GetAllocator(); @@ -403,4 +403,4 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Concat, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeInt32, PrimitiveType_Concat, OpenCLKernelCreator) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/concat.h b/mindspore-lite/src/litert/kernel/opencl/kernel/concat.h index 15094387..a3c6df92 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/concat.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/concat.h @@ -22,7 +22,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/concat_parameter.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class ConcatOpenCLKernel : public OpenCLKernel { public: @@ -62,5 +62,5 @@ class ConcatOpenCLKernel : public OpenCLKernel { }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d.cc index 4b66a494..f1de812c 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d.cc @@ -40,7 +40,7 @@ using mindspore::schema::ActivationType_TANH; using mindspore::schema::PrimitiveType_Conv2DFusion; using mindspore::schema::PrimitiveType_FullConnection; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int Conv2DOpenCLKernel::CheckSpecsWithoutShape() { if (!IsFilterConst()) { MS_LOG(WARNING) << "Conv2D doesn't support non-constant filter yet"; @@ -810,4 +810,4 @@ kernel::LiteKernel *OpenCLConv2DCreator(const std::vector &input REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Conv2DFusion, OpenCLConv2DCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Conv2DFusion, OpenCLConv2DCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d.h b/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d.h index 4ad08678..50961e68 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d.h @@ -25,7 +25,7 @@ #include "nnacl_c/conv_parameter.h" #include "schema/ops_generated.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { using lite::opencl::MemType; constexpr size_t CI_TILE = C4NUM; @@ -118,6 +118,6 @@ class Conv2DOpenCLKernel : public OpenCLKernel { } block_size_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_CONV2D_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d_transpose.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d_transpose.cc index e8efae25..f7d07c0c 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d_transpose.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/conv2d_transpose.cc @@ -31,7 +31,7 @@ using mindspore::schema::ActivationType_RELU; using mindspore::schema::ActivationType_RELU6; using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int Conv2dTransposeOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() != INPUT_TENSOR_SIZE_2 && in_tensors_.size() != INPUT_TENSOR_SIZE_3) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { @@ -366,4 +366,4 @@ kernel::LiteKernel *OpenCLConv2dTransposeCreator(const std::vector CROP_SUPPORT_DTYPES = { {kNumberTypeFloat32, "fp32"}, @@ -157,4 +157,4 @@ int CropOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Crop, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Crop, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeInt32, PrimitiveType_Crop, OpenCLKernelCreator); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/crop.h b/mindspore-lite/src/litert/kernel/opencl/kernel/crop.h index 117ee024..3f8f9dc3 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/crop.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/crop.h @@ -22,7 +22,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/crop_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class CropOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -45,5 +45,5 @@ class CropOpenCLKernel : public OpenCLKernel { GpuTensorInfo out_gpu_info_ = {}; int offset_[COMM_SHAPE_SIZE] = {0}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/depthwise_conv2d.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/depthwise_conv2d.cc index 6b923fb7..aeb4b97b 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/depthwise_conv2d.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/depthwise_conv2d.cc @@ -34,7 +34,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::opencl::ImageSize; using mindspore::lite::opencl::MemType; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int DepthwiseConv2dOpenCLKernel::CheckSpecs() { auto ret = InputOutputCheckSpecs(); if (ret != RET_OK) { @@ -461,4 +461,4 @@ int DepthwiseConv2dOpenCLKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/depthwise_conv2d.h b/mindspore-lite/src/litert/kernel/opencl/kernel/depthwise_conv2d.h index 909f61d8..1d8d05f6 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/depthwise_conv2d.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/depthwise_conv2d.h @@ -23,7 +23,7 @@ using mindspore::lite::opencl::MemType; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class DepthwiseConv2dOpenCLKernel : public OpenCLKernel { public: DepthwiseConv2dOpenCLKernel(OpParameter *parameter, const std::vector &inputs, @@ -60,6 +60,6 @@ class DepthwiseConv2dOpenCLKernel : public OpenCLKernel { } block_size_; MemType filter_type_{MemType::BUF}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_DEPTHWISE_CONV2D_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/fill.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/fill.cc index 28229d3d..6ac0c85c 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/fill.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/fill.cc @@ -26,7 +26,7 @@ using mindspore::lite::opencl::ImageSize; using mindspore::schema::PrimitiveType_Fill; using mindspore::schema::PrimitiveType_Shape; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int FillOpenCLKernel::RunFill() { auto allocator_ = ocl_runtime_->GetAllocator(); ImageSize img_size; @@ -116,4 +116,4 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Fill, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Fill, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Shape, OpenCLKernelCreator); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/fill.h b/mindspore-lite/src/litert/kernel/opencl/kernel/fill.h index f81873a6..710b5610 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/fill.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/fill.h @@ -21,7 +21,7 @@ #include "nnacl_c/base/fill_base.h" #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class FillOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -42,5 +42,5 @@ class FillOpenCLKernel : public OpenCLKernel { int default_{0}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/fullconnection.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/fullconnection.cc index 9f61a966..384adc41 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/fullconnection.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/fullconnection.cc @@ -33,7 +33,7 @@ using mindspore::schema::ActivationType_RELU6; using mindspore::schema::ActivationType_TANH; using mindspore::schema::PrimitiveType_FullConnection; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int FullConnectionOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() != INPUT_TENSOR_SIZE_2 && in_tensors_.size() != INPUT_TENSOR_SIZE_3) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { @@ -440,4 +440,4 @@ int FullConnectionOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_FullConnection, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_FullConnection, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/fullconnection.h b/mindspore-lite/src/litert/kernel/opencl/kernel/fullconnection.h index da851b05..4d436acd 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/fullconnection.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/fullconnection.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/matmul_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class FullConnectionOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -51,6 +51,6 @@ class FullConnectionOpenCLKernel : public OpenCLKernel { int CI_remainder_{1}; int CO_{1}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_FULLCONNECTION_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/fusion_eltwise.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/fusion_eltwise.cc index 386796c7..a3b3c678 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/fusion_eltwise.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/fusion_eltwise.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr int LOG_PREFIX_SCALE = 2; static std::set SupportedOperators = { // Arithmetic Primitive @@ -622,4 +622,4 @@ int FusionEltwiseOpenCLKernel::GetTensorIdx(lite::Tensor *in_tensor) { } return 0; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/fusion_eltwise.h b/mindspore-lite/src/litert/kernel/opencl/kernel/fusion_eltwise.h index 54900401..a241c8f6 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/fusion_eltwise.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/fusion_eltwise.h @@ -33,7 +33,7 @@ using mindspore::schema::ActivationType; using mindspore::schema::PrimitiveType; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr schema::PrimitiveType PrimitiveType_FusionEltwise = static_cast(-100); enum EltwiseOperator { @@ -209,6 +209,6 @@ class FusionEltwiseOpenCLKernel : public OpenCLKernel { std::vector buffer_weights_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_FUSION_ELTWISE_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/gather.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/gather.cc index 67e8a7ee..80b1ce8b 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/gather.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/gather.cc @@ -30,7 +30,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::opencl::ImageSize; using mindspore::schema::PrimitiveType_Gather; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int GatherOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_3) { MS_LOG(WARNING) << "GatherOpenCLKernel only supports 3 input Tensor but get " << in_tensors_.size(); @@ -364,4 +364,4 @@ int GatherOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Gather, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Gather, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeInt32, PrimitiveType_Gather, OpenCLKernelCreator); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/gather.h b/mindspore-lite/src/litert/kernel/opencl/kernel/gather.h index 98da3725..d8a90eea 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/gather.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/gather.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/gather_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class GatherOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -49,5 +49,5 @@ class GatherOpenCLKernel : public OpenCLKernel { bool is_indices_tensor_const_{true}; bool is_fp16_enabled_{false}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/gl_to_cl.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/gl_to_cl.cc index fc260f40..960e9fff 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/gl_to_cl.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/gl_to_cl.cc @@ -20,7 +20,7 @@ #include "src/litert/kernel_registry.h" #include "src/litert/kernel/opencl/cl/gl_to_cl.cl.inc" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { const cl_GLenum kGlTexture2D = 0x0DE1; int GLToCLOpenCLKernel::CheckSpecs() { return RET_OK; } @@ -149,4 +149,4 @@ int GLToCLOpenCLKernel::InferShape() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/gl_to_cl.h b/mindspore-lite/src/litert/kernel/opencl/kernel/gl_to_cl.h index 27fe4a42..bbbb3962 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/gl_to_cl.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/gl_to_cl.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class GLToCLOpenCLKernel : public OpenCLKernel { public: GLToCLOpenCLKernel(OpParameter *parameter, const std::vector &inputs, @@ -48,6 +48,6 @@ class GLToCLOpenCLKernel : public OpenCLKernel { size_t W_{1}; size_t C_{1}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_GL_TO_CL_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/int8/arithmetic_int8.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/int8/arithmetic_int8.cc index 48cc00ef..d9345d22 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/int8/arithmetic_int8.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/int8/arithmetic_int8.cc @@ -42,7 +42,7 @@ using mindspore::schema::PrimitiveType_Eltwise; using mindspore::schema::PrimitiveType_MulFusion; using mindspore::schema::PrimitiveType_SubFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ArithmeticInt8OpenCLKernel::CheckSpecs() { for (auto &tensor : in_tensors_) { if (tensor->data_type() != kNumberTypeInt8) { @@ -282,4 +282,4 @@ int ArithmeticInt8OpenCLKernel::Run() { } REG_KERNEL(kGPU, kNumberTypeInt8, PrimitiveType_AddFusion, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/int8/arithmetic_int8.h b/mindspore-lite/src/litert/kernel/opencl/kernel/int8/arithmetic_int8.h index 488be367..fee65e73 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/int8/arithmetic_int8.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/int8/arithmetic_int8.h @@ -22,7 +22,7 @@ #include #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ArithmeticInt8OpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -45,6 +45,6 @@ class ArithmeticInt8OpenCLKernel : public OpenCLKernel { std::vector weight_ptrs_; std::string kernel_name_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_INT8_ARITHMETIC_INT8_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/layer_norm.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/layer_norm.cc index c7f4cfd4..9baf440f 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/layer_norm.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/layer_norm.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_LayerNormFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int LayerNormOpenCLKernel::CheckSpecs() { auto param = reinterpret_cast(this->op_parameter_); CHECK_NULL_RETURN(param); @@ -371,4 +371,4 @@ int LayerNormOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_LayerNormFusion, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_LayerNormFusion, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/layer_norm.h b/mindspore-lite/src/litert/kernel/opencl/kernel/layer_norm.h index 98839e56..3a7d43ee 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/layer_norm.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/layer_norm.h @@ -20,7 +20,7 @@ #include #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class LayerNormOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -53,5 +53,5 @@ class LayerNormOpenCLKernel : public OpenCLKernel { cl::Kernel kernel_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/matmul.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/matmul.cc index 7f2e7a39..24abf240 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/matmul.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/matmul.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_MatMulFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { bool IsUseStrassenMatmul(const std::vector &in_tensors_) { if (in_tensors_.at(0)->shape().size() == DIMENSION_2D) { auto shape0 = in_tensors_.at(0)->shape(); @@ -470,4 +470,4 @@ kernel::LiteKernel *OpenCLMatMulKernelCreator(const std::vector REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_MatMulFusion, OpenCLMatMulKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_MatMulFusion, OpenCLMatMulKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/matmul.h b/mindspore-lite/src/litert/kernel/opencl/kernel/matmul.h index 3981a8c1..f60acd66 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/matmul.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/matmul.h @@ -22,7 +22,7 @@ #include "src/common/utils.h" #include "nnacl_c/matmul_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MatMulOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -56,6 +56,6 @@ class MatMulOpenCLKernel : public OpenCLKernel { private: int PadWeight(std::vector weight_shape_4d, int ci, int co); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_MATMUL_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/one_hot.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/one_hot.cc index 561ccba9..cb376991 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/one_hot.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/one_hot.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_OneHot; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int OneHotOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() < INPUT_TENSOR_SIZE_2 || in_tensors_.size() > INPUT_TENSOR_SIZE_4) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { @@ -153,4 +153,4 @@ int OneHotOpenCLKernel::Run() { } REG_KERNEL(kGPU, kNumberTypeInt32, PrimitiveType_OneHot, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/one_hot.h b/mindspore-lite/src/litert/kernel/opencl/kernel/one_hot.h index dcec9932..bea319b5 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/one_hot.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/one_hot.h @@ -23,7 +23,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/fp32/one_hot_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class OneHotOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -46,6 +46,6 @@ class OneHotOpenCLKernel : public OpenCLKernel { GpuTensorInfo out_shape_; OneHotParameter *param_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_ONE_HOT_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/pad.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/pad.cc index 35478115..8f7799d9 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/pad.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/pad.cc @@ -30,7 +30,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PaddingMode_CONSTANT; using mindspore::schema::PrimitiveType_PadFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int PadOpenCLKernel::CheckSpecs() { auto param = reinterpret_cast(op_parameter_); MS_ASSERT(param); @@ -156,4 +156,4 @@ int PadOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_PadFusion, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_PadFusion, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/pad.h b/mindspore-lite/src/litert/kernel/opencl/kernel/pad.h index 253a9adf..7533654b 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/pad.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/pad.h @@ -24,7 +24,7 @@ #include "schema/model_generated.h" #include "nnacl_c/pad_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class PadOpenCLKernel : public OpenCLKernel { public: PadOpenCLKernel(OpParameter *parameter, const std::vector &inputs, @@ -42,6 +42,6 @@ class PadOpenCLKernel : public OpenCLKernel { private: PadParameter *param_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_PAD_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/pooling2d.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/pooling2d.cc index e56858b4..48f6df9a 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/pooling2d.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/pooling2d.cc @@ -31,7 +31,7 @@ using mindspore::lite::opencl::MemType; using mindspore::schema::PrimitiveType_AvgPoolFusion; using mindspore::schema::PrimitiveType_MaxPoolFusion; -namespace mindspore { +namespace mindspore::lite { namespace kernel { int PoolingOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { @@ -208,4 +208,4 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_MaxPoolFusion, OpenCLKernelCr REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_AvgPoolFusion, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_MaxPoolFusion, OpenCLKernelCreator) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/pooling2d.h b/mindspore-lite/src/litert/kernel/opencl/kernel/pooling2d.h index 5e6ef777..4a8c8132 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/pooling2d.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/pooling2d.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/fp32/pooling_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class PoolingOpenCLKernel : public OpenCLKernel { public: PoolingOpenCLKernel(OpParameter *parameter, const std::vector &inputs, @@ -48,6 +48,6 @@ class PoolingOpenCLKernel : public OpenCLKernel { GpuTensorInfo input_tensor_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_POOLING2D_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/power.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/power.cc index 6e925502..21f60655 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/power.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/power.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_PowFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int PowerOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() != INPUT_TENSOR_SIZE_1 && in_tensors_.size() != INPUT_TENSOR_SIZE_2) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { @@ -192,4 +192,4 @@ int PowerOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_PowFusion, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_PowFusion, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/power.h b/mindspore-lite/src/litert/kernel/opencl/kernel/power.h index 33f02aac..fd196424 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/power.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/power.h @@ -21,7 +21,7 @@ #include "nnacl_c/fp32/power_fp32.h" #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class PowerOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -43,5 +43,5 @@ class PowerOpenCLKernel : public OpenCLKernel { float shift_{1.0}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/prelu.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/prelu.cc index 8c319140..c1ff2e19 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/prelu.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/prelu.cc @@ -31,7 +31,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_PReLUFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { #ifdef ENABLE_FP16 int PReluOpenCLKernel::InitWeights() { auto allocator = ocl_runtime_->GetAllocator(); @@ -232,4 +232,4 @@ int PReluOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_PReLUFusion, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_PReLUFusion, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/prelu.h b/mindspore-lite/src/litert/kernel/opencl/kernel/prelu.h index 420fde9c..aaaacbf8 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/prelu.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/prelu.h @@ -23,7 +23,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "schema/model_generated.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class PReluOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -48,6 +48,6 @@ class PReluOpenCLKernel : public OpenCLKernel { bool weight_is_scalar{false}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_PRELU_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/reduce.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/reduce.cc index 9943a64e..27b46e8d 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/reduce.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/reduce.cc @@ -39,7 +39,7 @@ using mindspore::schema::ReduceMode_ReduceProd; using mindspore::schema::ReduceMode_ReduceSum; using mindspore::schema::ReduceMode_ReduceSumSquare; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { std::string ReduceOpenCLKernel::GetReduceTypeStr(int type) { static const std::map reduce_type2str{ {ReduceMode_ReduceMean, "Mean"}, {ReduceMode_ReduceSum, "Sum"}, {ReduceMode_ReduceMin, "Min"}, @@ -340,4 +340,4 @@ int ReduceOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ReduceFusion, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ReduceFusion, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/reduce.h b/mindspore-lite/src/litert/kernel/opencl/kernel/reduce.h index f66d28b0..3e26057b 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/reduce.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/reduce.h @@ -23,7 +23,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/reduce_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ReduceOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -57,6 +57,6 @@ class ReduceOpenCLKernel : public OpenCLKernel { static const size_t LOCAL_CACHE_THREAD{16}; int axes_[MAX_SHAPE_SIZE]; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_REDUCE_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/reshape.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/reshape.cc index bf91e608..a20fd14f 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/reshape.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/reshape.cc @@ -30,7 +30,7 @@ using mindspore::schema::PrimitiveType_Reshape; using mindspore::schema::PrimitiveType_Squeeze; using mindspore::schema::PrimitiveType_Unsqueeze; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ReshapeOpenCLKernel::CheckSpecs() { if ((in_tensors_.size() != INPUT_TENSOR_SIZE_1 && in_tensors_.size() != INPUT_TENSOR_SIZE_2) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { @@ -150,4 +150,4 @@ REG_KERNEL(kGPU, kNumberTypeInt32, PrimitiveType_Unsqueeze, OpenCLKernelCreator< REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ExpandDims, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ExpandDims, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeInt32, PrimitiveType_ExpandDims, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/reshape.h b/mindspore-lite/src/litert/kernel/opencl/kernel/reshape.h index 7690af40..a9752a97 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/reshape.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/reshape.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ReshapeOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -34,6 +34,6 @@ class ReshapeOpenCLKernel : public OpenCLKernel { int SetGlobalLocal() override; int PreProcess() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_RESHAPE_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/resize.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/resize.cc index 9315729b..dfa8dcd0 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/resize.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/resize.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_Resize; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ResizeOpenCLKernel::CheckSpecs() { if (!(in_tensors_.size() == INPUT_TENSOR_SIZE_1 || in_tensors_.size() == INPUT_TENSOR_SIZE_2) || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { @@ -164,4 +164,4 @@ int ResizeOpenCLKernel::PreProcess() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Resize, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Resize, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/resize.h b/mindspore-lite/src/litert/kernel/opencl/kernel/resize.h index 8f7d9143..e4d69d49 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/resize.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/resize.h @@ -22,7 +22,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/resize_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ResizeOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -41,6 +41,6 @@ class ResizeOpenCLKernel : public OpenCLKernel { bool alignCorner{false}; bool preserveAspectRatio{false}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_RESIZE_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/scale.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/scale.cc index 9fdfa3f7..1678131c 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/scale.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/scale.cc @@ -32,7 +32,7 @@ using mindspore::lite::opencl::ImageSize; using mindspore::lite::opencl::MemType; using mindspore::schema::PrimitiveType_ScaleFusion; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const int INPUT_DATA = 0; const int INPUT_SCALE = 1; @@ -321,4 +321,4 @@ int ScaleOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ScaleFusion, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ScaleFusion, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/scale.h b/mindspore-lite/src/litert/kernel/opencl/kernel/scale.h index 09a70acf..30491157 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/scale.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/scale.h @@ -21,7 +21,7 @@ #include "nnacl_c/scale_parameter.h" #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ScaleOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -45,6 +45,6 @@ class ScaleOpenCLKernel : public OpenCLKernel { std::vector local_size_; std::vector global_size_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_SCALE_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/softmax.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/softmax.cc index e15effb7..09c4dbc5 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/softmax.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/softmax.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Softmax; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const int SOFTMAX1x1_32_MAX_C4_NUM = 8; } @@ -187,4 +187,4 @@ int SoftmaxOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Softmax, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Softmax, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/softmax.h b/mindspore-lite/src/litert/kernel/opencl/kernel/softmax.h index 22f598da..19ddb69d 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/softmax.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/softmax.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/fp32/softmax_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SoftmaxOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -44,6 +44,6 @@ class SoftmaxOpenCLKernel : public OpenCLKernel { GpuTensorInfo out_shape_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_SOFTMAX_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_batch_nd.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_batch_nd.cc index 73874e77..95096779 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_batch_nd.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_batch_nd.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SpaceToBatch; using mindspore::schema::PrimitiveType_SpaceToBatchND; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SpaceToBatchNDOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); @@ -147,4 +147,4 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_SpaceToBatchND, OpenCLKernelC REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_SpaceToBatchND, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_SpaceToBatch, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_SpaceToBatch, OpenCLKernelCreator); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_batch_nd.h b/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_batch_nd.h index bde3a88b..69e62976 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_batch_nd.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_batch_nd.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/fp32/space_to_batch_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SpaceToBatchNDOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -37,5 +37,5 @@ class SpaceToBatchNDOpenCLKernel : public OpenCLKernel { private: }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_depth.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_depth.cc index ebfc9742..d8d04a57 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_depth.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_depth.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_PARAM_INVALID; using mindspore::schema::PrimitiveType_DepthToSpace; using mindspore::schema::PrimitiveType_SpaceToDepth; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SpaceToDepthOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); @@ -135,4 +135,4 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_SpaceToDepth, OpenCLKernelCre REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_SpaceToDepth, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_DepthToSpace, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_DepthToSpace, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_depth.h b/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_depth.h index e00c5177..f32d3d17 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_depth.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/space_to_depth.h @@ -23,7 +23,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/space_to_depth_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SpaceToDepthOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -39,6 +39,6 @@ class SpaceToDepthOpenCLKernel : public OpenCLKernel { GpuTensorInfo in_shape_; GpuTensorInfo out_shape_; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_SPACE_TO_DEPTH_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/sparse_to_dense.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/sparse_to_dense.cc index 9e790866..f426933b 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/sparse_to_dense.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/sparse_to_dense.cc @@ -30,7 +30,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::opencl::ImageSize; using mindspore::schema::PrimitiveType_SparseToDense; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SparseToDenseOpenCLKernel::InitOutputToDefault() { auto allocator_ = ocl_runtime_->GetAllocator(); ImageSize img_size; @@ -331,4 +331,4 @@ int SparseToDenseOpenCLKernel::Run() { } REG_KERNEL(kGPU, kNumberTypeInt32, PrimitiveType_SparseToDense, OpenCLKernelCreator); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/sparse_to_dense.h b/mindspore-lite/src/litert/kernel/opencl/kernel/sparse_to_dense.h index 306411b7..8de44529 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/sparse_to_dense.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/sparse_to_dense.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/fp32/sparse_to_dense_fp32.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SparseToDenseOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -59,5 +59,5 @@ class SparseToDenseOpenCLKernel : public OpenCLKernel { cl_int out_w_{1}; cl_int out_c_{1}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/split.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/split.cc index c92c4869..47b9a43c 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/split.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/split.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::opencl::ImageSize; using mindspore::schema::PrimitiveType_Split; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int SplitOpenCLKernel::RunAxis0() { auto allocator_ = ocl_runtime_->GetAllocator(); auto src_data = in_tensors_[0]->data(); @@ -276,4 +276,4 @@ int SplitOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Split, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Split, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/split.h b/mindspore-lite/src/litert/kernel/opencl/kernel/split.h index 80ebb4e1..0ef4f069 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/split.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/split.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/split_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class SplitOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -53,5 +53,5 @@ class SplitOpenCLKernel : public OpenCLKernel { uint32_t OC = {1}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/stack.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/stack.cc index f7d35f29..7e865c0e 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/stack.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/stack.cc @@ -27,7 +27,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::opencl::ImageSize; using mindspore::schema::PrimitiveType_Stack; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int StackOpenCLKernel::RunAxis0() { auto allocator_ = ocl_runtime_->GetAllocator(); ImageSize img_size; @@ -242,4 +242,4 @@ int StackOpenCLKernel::Run() { } REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Stack, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Stack, OpenCLKernelCreator); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/stack.h b/mindspore-lite/src/litert/kernel/opencl/kernel/stack.h index 9e7f1be4..23a86331 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/stack.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/stack.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/stack_parameter.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class StackOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -50,5 +50,5 @@ class StackOpenCLKernel : public OpenCLKernel { cl_int4 out_shape_ = {}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/strassen.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/strassen.cc index aca2cbb5..2b9eca3c 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/strassen.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/strassen.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::opencl::ImageSize; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const int half = 2; } @@ -518,4 +518,4 @@ int StrassenOpenCLKernel::Run() { threshold); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/strassen.h b/mindspore-lite/src/litert/kernel/opencl/kernel/strassen.h index e0282af8..47ce3a7f 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/strassen.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/strassen.h @@ -23,7 +23,7 @@ #define MAXDEPTH 5 -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class StrassenOpenCLKernel : public MatMulOpenCLKernel { public: using MatMulOpenCLKernel::MatMulOpenCLKernel; @@ -70,6 +70,6 @@ class StrassenOpenCLKernel : public MatMulOpenCLKernel { void *B_temp[MAXDEPTH] = {nullptr}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_WINOGRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/strided_slice.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/strided_slice.cc index d9d04dd9..007f3ba1 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/strided_slice.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/strided_slice.cc @@ -29,7 +29,7 @@ using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_SliceFusion; using mindspore::schema::PrimitiveType_StridedSlice; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { const int INPUT_BEGIN = 1; const int INPUT_END = 2; @@ -276,4 +276,4 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_SliceFusion, OpenCLKernelCrea REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_SliceFusion, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_StridedSlice, OpenCLKernelCreator); REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_StridedSlice, OpenCLKernelCreator); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/strided_slice.h b/mindspore-lite/src/litert/kernel/opencl/kernel/strided_slice.h index 129de4fa..f9990806 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/strided_slice.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/strided_slice.h @@ -21,7 +21,7 @@ #include "src/litert/kernel/opencl/opencl_kernel.h" #include "nnacl_c/base/slice_base.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class StridedSliceOpenCLKernel : public OpenCLKernel { public: using OpenCLKernel::OpenCLKernel; @@ -47,5 +47,5 @@ class StridedSliceOpenCLKernel : public OpenCLKernel { cl_int4 stride_{{1, 1, 1, 1}}; cl_int4 size_{}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/to_format.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/to_format.cc index 5173cb38..b0d0e648 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/to_format.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/to_format.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::opencl::MemType; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int ToFormatOpenCLKernel::CheckSpecsWithoutShape() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_1 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { MS_LOG(WARNING) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); @@ -140,4 +140,4 @@ int ToFormatOpenCLKernel::InferShape() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/to_format.h b/mindspore-lite/src/litert/kernel/opencl/kernel/to_format.h index e1020ede..04ff147f 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/to_format.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/to_format.h @@ -21,7 +21,7 @@ #include "src/litert/lite_kernel.h" #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class ToFormatOpenCLKernel : public OpenCLKernel { public: ToFormatOpenCLKernel(OpParameter *parameter, const std::vector &inputs, @@ -47,6 +47,6 @@ class ToFormatOpenCLKernel : public OpenCLKernel { size_t W_{1}; size_t C_{1}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_RESHAPE_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/transpose.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/transpose.cc index 634cae92..67d7683f 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/transpose.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/transpose.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Transpose; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int TransposeOpenCLKernel::CheckSpecs() { if (in_tensors_.size() != INPUT_TENSOR_SIZE_2 || out_tensors_.size() != OUTPUT_TENSOR_SIZE_1) { MS_LOG(WARNING) << "Transpose input output size unsupported."; @@ -200,4 +200,4 @@ int TransposeOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Transpose, OpenCLKernelCreator) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Transpose, OpenCLKernelCreator) -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/transpose.h b/mindspore-lite/src/litert/kernel/opencl/kernel/transpose.h index f7cc7c06..d53d9e1d 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/transpose.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/transpose.h @@ -22,7 +22,7 @@ #include "nnacl_c/transpose_parameter.h" #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { enum class TransposeType { AXIS0312, AXIS0231, GENERAL }; class TransposeOpenCLKernel : public OpenCLKernel { @@ -42,6 +42,6 @@ class TransposeOpenCLKernel : public OpenCLKernel { GpuTensorInfo tensor_size_; int perm_4d_[4]; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_TRANSPOSE_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/winograd.cc b/mindspore-lite/src/litert/kernel/opencl/kernel/winograd.cc index b947c2c6..a2e49b30 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/winograd.cc +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/winograd.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { void Align(const std::vector &global, const std::vector &local, cl::NDRange *global_range, cl::NDRange *local_range) { @@ -366,4 +366,4 @@ double WinogradOpenCLKernel::GetProfilingTimeMs() { time_ns += time_end - time_start; return static_cast(time_ns) * 1e-6; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/kernel/winograd.h b/mindspore-lite/src/litert/kernel/opencl/kernel/winograd.h index bcceb63f..ea645de7 100644 --- a/mindspore-lite/src/litert/kernel/opencl/kernel/winograd.h +++ b/mindspore-lite/src/litert/kernel/opencl/kernel/winograd.h @@ -21,7 +21,7 @@ #include #include "src/litert/kernel/opencl/kernel/conv2d.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class WinogradOpenCLKernel : public Conv2DOpenCLKernel { public: WinogradOpenCLKernel(OpParameter *parameter, const std::vector &inputs, @@ -55,6 +55,6 @@ class WinogradOpenCLKernel : public Conv2DOpenCLKernel { void *winograd_mem1_{nullptr}; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_KERNEL_WINOGRAD_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/opencl_fusion.cc b/mindspore-lite/src/litert/kernel/opencl/opencl_fusion.cc index b4e3ae20..f465d9d0 100644 --- a/mindspore-lite/src/litert/kernel/opencl/opencl_fusion.cc +++ b/mindspore-lite/src/litert/kernel/opencl/opencl_fusion.cc @@ -47,7 +47,7 @@ using mindspore::schema::PrimitiveType_Activation; using mindspore::schema::PrimitiveType_Eltwise; using mindspore::schema::PrimitiveType_NONE; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { namespace { template inline bool AIsInB(const T0 *a, const T1 *b) { @@ -742,4 +742,4 @@ int OpenCLSubGraph::FusionPass() { MS_LOG(DEBUG) << "number of kernels(after fusion) : " << nodes_.size(); return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/opencl_kernel.cc b/mindspore-lite/src/litert/kernel/opencl/opencl_kernel.cc index 578d730e..6006b276 100644 --- a/mindspore-lite/src/litert/kernel/opencl/opencl_kernel.cc +++ b/mindspore-lite/src/litert/kernel/opencl/opencl_kernel.cc @@ -23,7 +23,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::opencl::ImageSize; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int CpuAxis2GpuAxis(size_t ndim, int cpu_axis, int *gpu_axis) { static const std::vector> kCpuAxis2GpuAxisMapTable = { // For 1D tensor, map the cpu axis [0] to gpu axis [kNHWC_C]. @@ -456,4 +456,4 @@ int OpenCLKernel::CheckSpecs() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/opencl_kernel.h b/mindspore-lite/src/litert/kernel/opencl/opencl_kernel.h index 86d07453..cd5ab60c 100644 --- a/mindspore-lite/src/litert/kernel/opencl/opencl_kernel.h +++ b/mindspore-lite/src/litert/kernel/opencl/opencl_kernel.h @@ -34,7 +34,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr int INPUT_TENSOR_SIZE_1 = 1; constexpr int INPUT_TENSOR_SIZE_2 = 2; constexpr int INPUT_TENSOR_SIZE_3 = 3; @@ -395,6 +395,6 @@ kernel::LiteKernel *OpenCLKernelCreator(const std::vector &input } return kernel; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_OPENCL_KERNEL_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/opencl_subgraph.cc b/mindspore-lite/src/litert/kernel/opencl/opencl_subgraph.cc index 8a86e4b5..61f09dab 100644 --- a/mindspore-lite/src/litert/kernel/opencl/opencl_subgraph.cc +++ b/mindspore-lite/src/litert/kernel/opencl/opencl_subgraph.cc @@ -27,7 +27,7 @@ #include "include/errorcode.h" #include "src/common/utils.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::opencl::MemType; @@ -565,4 +565,4 @@ int OpenCLSubGraph::Execute(const KernelCallBack &before, const KernelCallBack & } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/opencl_subgraph.h b/mindspore-lite/src/litert/kernel/opencl/opencl_subgraph.h index 4651f056..c0f63e39 100644 --- a/mindspore-lite/src/litert/kernel/opencl/opencl_subgraph.h +++ b/mindspore-lite/src/litert/kernel/opencl/opencl_subgraph.h @@ -25,7 +25,7 @@ #include "src/litert/kernel/gpu/opencl/opencl_executor.h" #include "src/executor/sub_graph_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class OpenCLSubGraph : public SubGraphKernel { public: OpenCLSubGraph(const std::vector &inKernels, @@ -97,6 +97,6 @@ class OpenCLSubGraph : public SubGraphKernel { lite::opencl::OpenCLRuntime *ocl_runtime_{nullptr}; bool all_kernels_infer_done_ = false; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_OPENCL_SUBGRAPH_H_ diff --git a/mindspore-lite/src/litert/kernel/opencl/utils.cc b/mindspore-lite/src/litert/kernel/opencl/utils.cc index cd495bff..e38bd2a6 100644 --- a/mindspore-lite/src/litert/kernel/opencl/utils.cc +++ b/mindspore-lite/src/litert/kernel/opencl/utils.cc @@ -28,7 +28,7 @@ using mindspore::schema::ActivationType_RELU6; using mindspore::schema::ActivationType_SIGMOID; using mindspore::schema::ActivationType_TANH; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { const std::set ArithmeticPrimitives = {schema::PrimitiveType_MulFusion, schema::PrimitiveType_AddFusion, schema::PrimitiveType_SubFusion, @@ -396,4 +396,4 @@ std::vector CreateBuildOptionsExtByDType(TypeId type_id) { } return build_options_ext; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel/opencl/utils.h b/mindspore-lite/src/litert/kernel/opencl/utils.h index d392c7a1..ba883784 100644 --- a/mindspore-lite/src/litert/kernel/opencl/utils.h +++ b/mindspore-lite/src/litert/kernel/opencl/utils.h @@ -27,7 +27,7 @@ #include "src/common/utils.h" #include "src/litert/kernel/opencl/opencl_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { struct GpuTensorInfo; // for fusion @@ -103,6 +103,6 @@ std::vector MatrixMultiply(const T A[], const T B[], int M, int N, int K) { return C; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_OPENCL_UTILS_H_ diff --git a/mindspore-lite/src/litert/kernel_exec_util.cc b/mindspore-lite/src/litert/kernel_exec_util.cc index b4a885ee..76e47ffa 100644 --- a/mindspore-lite/src/litert/kernel_exec_util.cc +++ b/mindspore-lite/src/litert/kernel_exec_util.cc @@ -28,7 +28,7 @@ #include "src/control_flow/control_subgraph_creator.h" #include "src/litert/kernel/cpu/base/partial_fusion.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; @@ -697,4 +697,4 @@ std::vector KernelExecUtil::GetCallInputPartialsCorrespondingOutpu KernelExec *KernelExecUtil::GetPartialOutputCall(const KernelExec *partial_node) { return nullptr; } #endif -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/kernel_exec_util.h b/mindspore-lite/src/litert/kernel_exec_util.h index 8c2d43b4..e0cd9640 100644 --- a/mindspore-lite/src/litert/kernel_exec_util.h +++ b/mindspore-lite/src/litert/kernel_exec_util.h @@ -22,7 +22,7 @@ #include "src/executor/sub_graph_kernel.h" #include "src/litert/inner_context.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MS_API KernelExecUtil { public: @@ -64,6 +64,6 @@ class MS_API KernelExecUtil { static std::set AllOutTensor(const std::vector &kernels); }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_EXEC_UTIL_H_ diff --git a/mindspore-lite/src/litert/lite_kernel.cc b/mindspore-lite/src/litert/lite_kernel.cc index b2d28d13..6f7ef2fc 100644 --- a/mindspore-lite/src/litert/lite_kernel.cc +++ b/mindspore-lite/src/litert/lite_kernel.cc @@ -19,7 +19,7 @@ #include "src/common/utils.h" #include "src/litert/infer_manager.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; @@ -127,4 +127,4 @@ int LiteKernel::Execute() { } return lite::RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/src/litert/lite_kernel.h b/mindspore-lite/src/litert/lite_kernel.h index 501ab5d1..1dd881ab 100644 --- a/mindspore-lite/src/litert/lite_kernel.h +++ b/mindspore-lite/src/litert/lite_kernel.h @@ -36,7 +36,7 @@ using mindspore::infer::Abstractkernel; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class MS_API LiteKernel : public Abstractkernel { public: LiteKernel() = default; @@ -205,6 +205,6 @@ class MS_API LiteKernel : public Abstractkernel { int thread_num_ = 1; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_LITE_KERNEL_H_ diff --git a/mindspore-lite/src/litert/model_manager.cc b/mindspore-lite/src/litert/model_manager.cc index cb475778..ea019e40 100644 --- a/mindspore-lite/src/litert/model_manager.cc +++ b/mindspore-lite/src/litert/model_manager.cc @@ -20,7 +20,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { void ModelManager::AddModel(const std::string model_path) { (void)model_path_set_.insert(model_path); } void ModelManager::AddModel(const std::pair model_buff) { @@ -36,4 +36,4 @@ ModelManager::~ModelManager() { model_path_set_.clear(); model_buff_set_.clear(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/model_manager.h b/mindspore-lite/src/litert/model_manager.h index 4760f987..7b93259e 100644 --- a/mindspore-lite/src/litert/model_manager.h +++ b/mindspore-lite/src/litert/model_manager.h @@ -24,7 +24,7 @@ #include #include "src/litert/lite_session.h" -namespace mindspore { +namespace mindspore::lite { class ModelManager { public: ModelManager() {} @@ -48,6 +48,6 @@ class ModelManager { std::set model_path_set_; std::set> model_buff_set_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_MODEL_MANAGER_H_ diff --git a/mindspore-lite/src/litert/runtime_allocator.cc b/mindspore-lite/src/litert/runtime_allocator.cc index 06c85c69..62af6daa 100644 --- a/mindspore-lite/src/litert/runtime_allocator.cc +++ b/mindspore-lite/src/litert/runtime_allocator.cc @@ -16,7 +16,7 @@ #include "src/litert/runtime_allocator.h" -namespace mindspore { +namespace mindspore::lite { RuntimeAllocator::RuntimeAllocator(size_t aligned_size) { aligned_size_ = aligned_size; return; @@ -120,4 +120,4 @@ void RuntimeAllocator::MallocTensorData(lite::Tensor *tensor) { used_list_[offset] = size; offset_map_[tensor] = offset; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/litert/runtime_allocator.h b/mindspore-lite/src/litert/runtime_allocator.h index 67ef7022..59768d61 100644 --- a/mindspore-lite/src/litert/runtime_allocator.h +++ b/mindspore-lite/src/litert/runtime_allocator.h @@ -24,7 +24,7 @@ #include "include/errorcode.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class RuntimeAllocator : public Allocator { public: explicit RuntimeAllocator(size_t aligned_size = 32); @@ -58,6 +58,6 @@ class RuntimeAllocator : public Allocator { }; using RuntimeAllocatorPtr = std::shared_ptr; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_RUNTIME_ALLOCATOR_H_ diff --git a/mindspore-lite/src/registry/kernel_interface_registry.cc b/mindspore-lite/src/registry/kernel_interface_registry.cc index eebec656..96d0704e 100644 --- a/mindspore-lite/src/registry/kernel_interface_registry.cc +++ b/mindspore-lite/src/registry/kernel_interface_registry.cc @@ -25,7 +25,7 @@ using mindspore::registry::KernelInterfaceCreator; using mindspore::schema::PrimitiveType_MAX; using mindspore::schema::PrimitiveType_MIN; -namespace mindspore { +namespace mindspore::lite { namespace registry { namespace { static constexpr auto kMaxProviderNum = 10; @@ -206,4 +206,4 @@ KernelInterfaceRegistry::~KernelInterfaceRegistry() { } } } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/registry/kernel_interface_registry.h b/mindspore-lite/src/registry/kernel_interface_registry.h index 17bf00dc..eb3c9dc3 100644 --- a/mindspore-lite/src/registry/kernel_interface_registry.h +++ b/mindspore-lite/src/registry/kernel_interface_registry.h @@ -25,7 +25,7 @@ #include "include/registry/register_kernel_interface.h" #include "include/model.h" -namespace mindspore { +namespace mindspore::lite { namespace registry { class KernelInterfaceRegistry { public: @@ -59,6 +59,6 @@ class KernelInterfaceRegistry { std::map>> custom_kernels_; }; } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_REGISTRY_KERNEL_INTERFACE_REGISTRY_H_ diff --git a/mindspore-lite/src/registry/register_kernel.cc b/mindspore-lite/src/registry/register_kernel.cc index 50e853b4..12f5d0c5 100644 --- a/mindspore-lite/src/registry/register_kernel.cc +++ b/mindspore-lite/src/registry/register_kernel.cc @@ -20,7 +20,7 @@ #include "src/common/log_adapter.h" #include "src/registry/register_kernel_impl.h" -namespace mindspore { +namespace mindspore::lite { namespace registry { Status RegisterKernel::RegCustomKernel(const std::vector &arch, const std::vector &provider, DataType data_type, const std::vector &type, const CreateKernel creator) { @@ -44,4 +44,4 @@ CreateKernel RegisterKernel::GetCreator(const schema::Primitive *primitive, Kern return ret; } } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/registry/register_kernel_impl.cc b/mindspore-lite/src/registry/register_kernel_impl.cc index 68917ea5..8e228c13 100644 --- a/mindspore-lite/src/registry/register_kernel_impl.cc +++ b/mindspore-lite/src/registry/register_kernel_impl.cc @@ -23,7 +23,7 @@ using mindspore::registry::CreateKernel; using mindspore::registry::KernelDesc; using mindspore::schema::PrimitiveType_MAX; using mindspore::schema::PrimitiveType_MIN; -namespace mindspore::registry { +namespace mindspore::lite::registry { namespace { static const auto kOpTypeLen = PrimitiveType_MAX - PrimitiveType_MIN + 1; static const auto kDataTypeLen = @@ -212,4 +212,4 @@ RegistryKernelImpl::~RegistryKernelImpl() { } } } -} // namespace mindspore::registry +} // namespace mindspore::lite::registry diff --git a/mindspore-lite/src/registry/register_kernel_impl.h b/mindspore-lite/src/registry/register_kernel_impl.h index 4041b85a..8fcacd56 100644 --- a/mindspore-lite/src/registry/register_kernel_impl.h +++ b/mindspore-lite/src/registry/register_kernel_impl.h @@ -25,7 +25,7 @@ #include #include "include/registry/register_kernel.h" -namespace mindspore::registry { +namespace mindspore::lite::registry { class RegistryKernelImpl { public: RegistryKernelImpl() = default; @@ -66,6 +66,6 @@ class RegistryKernelImpl { registry::CreateKernel GetCustomKernelCreator(const schema::Primitive *primitive, registry::KernelDesc *desc); }; -} // namespace mindspore::registry +} // namespace mindspore::lite::registry #endif // MINDSPORE_LITE_SRC_REGISTRY_REGISTER_KERNEL_IMPL_H_ diff --git a/mindspore-lite/src/registry/register_kernel_interface.cc b/mindspore-lite/src/registry/register_kernel_interface.cc index 274ba3d9..749531cd 100644 --- a/mindspore-lite/src/registry/register_kernel_interface.cc +++ b/mindspore-lite/src/registry/register_kernel_interface.cc @@ -20,7 +20,7 @@ #include "src/common/log_adapter.h" #include "src/registry/kernel_interface_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace registry { Status RegisterKernelInterface::Reg(const std::vector &provider, int op_type, const KernelInterfaceCreator creator) { @@ -38,4 +38,4 @@ std::shared_ptr RegisterKernelInterface::GetKernelInter return KernelInterfaceRegistry::Instance()->GetKernelInterface(CharToString(provider), primitive, kernel); } } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/train/loss_kernel.h b/mindspore-lite/src/train/loss_kernel.h index f13b2fc8..0de6575f 100644 --- a/mindspore-lite/src/train/loss_kernel.h +++ b/mindspore-lite/src/train/loss_kernel.h @@ -17,7 +17,7 @@ #define MINDSPORE_LITE_SRC_TRAIN_LOSS_KERNEL_H_ #include #include "src/executor/kernel_exec.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class LossKernel : public LiteKernel { public: @@ -28,5 +28,5 @@ class LossKernel : public LiteKernel { ~LossKernel() override = default; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_TRAIN_LOSS_KERNEL_H_ diff --git a/mindspore-lite/src/train/opt_allocator.cc b/mindspore-lite/src/train/opt_allocator.cc index 485e4b97..dc832a3f 100644 --- a/mindspore-lite/src/train/opt_allocator.cc +++ b/mindspore-lite/src/train/opt_allocator.cc @@ -17,7 +17,7 @@ #include #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { size_t OptAllocator::FindFree(size_t size) { size_t min_size = std::numeric_limits::max(); size_t min_addr = std::numeric_limits::max(); @@ -86,4 +86,4 @@ void OptAllocator::Free(size_t addr) { alloc_.erase(addr); Reorder(addr); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/train/opt_allocator.h b/mindspore-lite/src/train/opt_allocator.h index 142165c3..e655e143 100644 --- a/mindspore-lite/src/train/opt_allocator.h +++ b/mindspore-lite/src/train/opt_allocator.h @@ -20,7 +20,7 @@ #include #include "include/api/allocator.h" -namespace mindspore { +namespace mindspore::lite { class OptAllocator { public: explicit OptAllocator(size_t aligned_size = 32) : align_size_(aligned_size) {} @@ -37,5 +37,5 @@ class OptAllocator { size_t heap_ = 0; size_t align_size_; }; -}; // namespace mindspore +}; // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_TRAIN_OPT_ALLOCATOR_H_ diff --git a/mindspore-lite/src/train/optimizer/common/fusion_utils.cc b/mindspore-lite/src/train/optimizer/common/fusion_utils.cc index 4da01b8c..b6d676b3 100644 --- a/mindspore-lite/src/train/optimizer/common/fusion_utils.cc +++ b/mindspore-lite/src/train/optimizer/common/fusion_utils.cc @@ -20,7 +20,7 @@ #include "src/common/log_util.h" #include "src/train/optimizer/common/fusion_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { STATUS GetMatchNodeIndex(schema::MetaGraphT *graph, const std::unordered_map> &matched_path, @@ -47,4 +47,4 @@ bool IsMultiOutputNode(schema::MetaGraphT *graph, size_t out_node_index) { return false; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/train/optimizer/common/fusion_utils.h b/mindspore-lite/src/train/optimizer/common/fusion_utils.h index 604472b5..d2f74f1c 100644 --- a/mindspore-lite/src/train/optimizer/common/fusion_utils.h +++ b/mindspore-lite/src/train/optimizer/common/fusion_utils.h @@ -29,7 +29,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::lite::STATUS; -namespace mindspore { +namespace mindspore::lite { namespace opt { inline constexpr int kInputIndexZero = 0; inline constexpr int kInputIndexOne = 1; @@ -48,5 +48,5 @@ STATUS GetMatchNodeIndex(schema::MetaGraphT *graph, bool IsMultiOutputNode(schema::MetaGraphT *graph, size_t out_node_index); } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_TRAIN_OPTIMIZER_COMMON_FUSION_UTILS_H_ diff --git a/mindspore-lite/src/train/optimizer_kernel.h b/mindspore-lite/src/train/optimizer_kernel.h index a4ec60ef..1890fa98 100644 --- a/mindspore-lite/src/train/optimizer_kernel.h +++ b/mindspore-lite/src/train/optimizer_kernel.h @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::RET_OUT_OF_TENSOR_RANGE; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { constexpr static int kWeightIdx = 0; constexpr static int kMomentVector1stIdx = 1; constexpr static int kMomentVector2stIdx = 2; @@ -246,5 +246,5 @@ class OptimizerKernel : public LiteKernel { WeightUpdateMode weight_update_mod_ = WeightUpdateMode::NORMAL; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_TRAIN_OPTIMIZER_KERNEL_H_ diff --git a/mindspore-lite/src/train/static_allocator.h b/mindspore-lite/src/train/static_allocator.h index 833fd832..4d8092d1 100644 --- a/mindspore-lite/src/train/static_allocator.h +++ b/mindspore-lite/src/train/static_allocator.h @@ -16,7 +16,7 @@ #ifndef MINDSPORE_LITE_SRC_TRAIN_STATIC_ALLOCATOR_H_ #define MINDSPORE_LITE_SRC_TRAIN_STATIC_ALLOCATOR_H_ -namespace mindspore { +namespace mindspore::lite { class StaticAllocator : public Allocator { public: void SetContex(void *buf, size_t size) { @@ -48,5 +48,5 @@ class StaticAllocator : public Allocator { size_t size_; size_t total_size_ = 0; }; -}; // namespace mindspore +}; // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_TRAIN_STATIC_ALLOCATOR_H_ diff --git a/mindspore-lite/src/train/train_populate_parameter.cc b/mindspore-lite/src/train/train_populate_parameter.cc index 3cfdd24d..14c6adec 100644 --- a/mindspore-lite/src/train/train_populate_parameter.cc +++ b/mindspore-lite/src/train/train_populate_parameter.cc @@ -35,7 +35,7 @@ using mindspore::lite::Registry; -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { constexpr int kInputIndexOne = 1; @@ -693,4 +693,4 @@ void PopulateTrainParameters() { lite::SCHEMA_CUR); } } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/src/train/train_populate_parameter.h b/mindspore-lite/src/train/train_populate_parameter.h index 12fa0dbd..7a86bddc 100644 --- a/mindspore-lite/src/train/train_populate_parameter.h +++ b/mindspore-lite/src/train/train_populate_parameter.h @@ -17,9 +17,9 @@ #ifndef MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_H_ #define MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_H_ -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void PopulateTrainParameters(); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_H_ diff --git a/mindspore-lite/src/train/train_populate_parameter_v0.h b/mindspore-lite/src/train/train_populate_parameter_v0.h index 6a070834..62158864 100644 --- a/mindspore-lite/src/train/train_populate_parameter_v0.h +++ b/mindspore-lite/src/train/train_populate_parameter_v0.h @@ -17,9 +17,9 @@ #ifndef MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_V0_H_ #define MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_V0_H_ -namespace mindspore::kernel { +namespace mindspore::lite::kernel { void PopulateTrainV0Parameters(); -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_V0_H_ diff --git a/mindspore-lite/test/common/common_test.cc b/mindspore-lite/test/common/common_test.cc index 46535383..674516a9 100644 --- a/mindspore-lite/test/common/common_test.cc +++ b/mindspore-lite/test/common/common_test.cc @@ -24,7 +24,7 @@ extern "C" { #endif #endif -namespace mindspore { +namespace mindspore::lite { void CommonTest::SetUpTestCase() {} void CommonTest::TearDownTestCase() {} void CommonTest::SetUp() {} @@ -101,7 +101,7 @@ void *CommonTest::TensorMutabData(lite::Tensor *tensor) { return tensor->Mutable size_t CommonTest::TensorSize(lite::Tensor *tensor) { return tensor->Size(); } int CommonTest::TensorMallocData(lite::Tensor *tensor) { return tensor->MallocData(); } -} // namespace mindspore +} // namespace mindspore::lite #ifdef __cplusplus #if __cplusplus diff --git a/mindspore-lite/test/main.cc b/mindspore-lite/test/main.cc index de5be496..d3540dd1 100644 --- a/mindspore-lite/test/main.cc +++ b/mindspore-lite/test/main.cc @@ -17,7 +17,7 @@ #include "gtest/gtest.h" #ifdef USE_GLOG -namespace mindspore { +namespace mindspore::lite { extern void InitSubModulesLogLevel(); } #endif diff --git a/mindspore-lite/test/st/mindrt_parallel_test.cc b/mindspore-lite/test/st/mindrt_parallel_test.cc index 7e2ce265..87cd8e4e 100644 --- a/mindspore-lite/test/st/mindrt_parallel_test.cc +++ b/mindspore-lite/test/st/mindrt_parallel_test.cc @@ -26,7 +26,7 @@ #include "src/common/file_utils.h" #include "include/converter.h" -namespace mindspore { +namespace mindspore::lite { class MindrtParallelTest : public mindspore::CommonTest { public: MindrtParallelTest() {} @@ -172,4 +172,4 @@ TEST_F(MindrtParallelTest, runtime1) { delete session; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/st/mix_data_type_test.cc b/mindspore-lite/test/st/mix_data_type_test.cc index cd02e804..0b83d85a 100644 --- a/mindspore-lite/test/st/mix_data_type_test.cc +++ b/mindspore-lite/test/st/mix_data_type_test.cc @@ -24,7 +24,7 @@ #include "include/api/model.h" #include "src/litert/cxx_api/model/model_impl.h" -namespace mindspore { +namespace mindspore::lite { class MixDataTypeTest : public mindspore::CommonTest { public: MixDataTypeTest() {} @@ -235,4 +235,4 @@ TEST_F(MixDataTypeTest, mix1) { ASSERT_LE(fabs(fp32_data[2] - (0.934805)), 0.01); ASSERT_LE(fabs(fp32_data[3] - (0.879054)), 0.01); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/st/sub_graph_test.cc b/mindspore-lite/test/st/sub_graph_test.cc index d5f902d5..75dca651 100644 --- a/mindspore-lite/test/st/sub_graph_test.cc +++ b/mindspore-lite/test/st/sub_graph_test.cc @@ -24,7 +24,7 @@ #include "src/litert/lite_session.h" #include "tools/common/meta_graph_serializer.h" -namespace mindspore { +namespace mindspore::lite { class SubGraphTest : public mindspore::CommonTest { public: SubGraphTest() = default; @@ -309,12 +309,12 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { meta_graph->name = "graph"; meta_graph->version = Version(); // ----------------------------------------------------------------------- - lite::MetaGraphSerializer::Save( - *meta_graph, "/mnt/data/workspace/OpenAI/Huawei/mindspore-lite/my_test/models/recursive_subgraph"); + lite::MetaGraphSerializer::Save(*meta_graph, + "/mnt/data/workspace/OpenAI/Huawei/mindspore-lite/my_test/models/recursive_subgraph"); // ----------------------------------------------------------------------- size_t size = 0; - char *graph_buf = lite::ReadFile( - "/mnt/data/workspace/OpenAI/Huawei/mindspore-lite/my_test/models/recursive_subgraph.ms", &size); + char *graph_buf = + lite::ReadFile("/mnt/data/workspace/OpenAI/Huawei/mindspore-lite/my_test/models/recursive_subgraph.ms", &size); ASSERT_NE(graph_buf, nullptr); auto model = std::shared_ptr(lite::Model::Import(graph_buf, size)); @@ -335,4 +335,4 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { ret = session->RunGraph(); ASSERT_EQ(ret, lite::RET_OK); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/adam_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/adam_infer_test.cc index 355bd16e..d9337afb 100644 --- a/mindspore-lite/test/ut/nnacl/infer/adam_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/adam_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/adam_infer.h" -namespace mindspore { +namespace mindspore::lite { class AdamInferTest : public mindspore::CommonTest { public: @@ -48,4 +48,4 @@ TEST_F(AdamInferTest, AdamInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/adam_weight_decay_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/adam_weight_decay_infer_test.cc index 62f8654c..d4426dd8 100644 --- a/mindspore-lite/test/ut/nnacl/infer/adam_weight_decay_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/adam_weight_decay_infer_test.cc @@ -17,7 +17,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/adam_weight_decay_infer.h" -namespace mindspore { +namespace mindspore::lite { class AdamWeightDecayInfer : public mindspore::CommonTest { public: AdamWeightDecayInfer() {} @@ -54,4 +54,4 @@ TEST_F(AdamWeightDecayInfer, OneDim) { ASSERT_EQ(outputs[0]->shape_[0], 1); AdamWeightDecayInferReleaseResources(param, inputs, outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/addn_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/addn_infer_test.cc index 6d5cd2c8..dd95d659 100644 --- a/mindspore-lite/test/ut/nnacl/infer/addn_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/addn_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/addn_infer.h" -namespace mindspore { +namespace mindspore::lite { class AddnInferTest : public mindspore::CommonTest { public: @@ -88,4 +88,4 @@ TEST_F(AddnInferTest, AddnInferTest1) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc index ee3b919c..fb098c96 100644 --- a/mindspore-lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/apply_momentum_infer.h" -namespace mindspore { +namespace mindspore::lite { class ApplyMomentumInferTest : public mindspore::CommonTest { public: @@ -48,4 +48,4 @@ TEST_F(ApplyMomentumInferTest, ApplyMomentumInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/argmax_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/argmax_infer_test.cc index 10ecad2c..9c3939e3 100644 --- a/mindspore-lite/test/ut/nnacl/infer/argmax_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/argmax_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/argmin_max_infer.h" -namespace mindspore { +namespace mindspore::lite { class ArgmaxInferTest : public mindspore::CommonTest { public: @@ -133,4 +133,4 @@ TEST_F(ArgmaxInferTest, ArgmaxInferTestTopK2) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/argmin_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/argmin_infer_test.cc index 1c6ed96a..731782a4 100644 --- a/mindspore-lite/test/ut/nnacl/infer/argmin_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/argmin_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/argmin_max_infer.h" -namespace mindspore { +namespace mindspore::lite { class ArgminInferTest : public mindspore::CommonTest { public: @@ -133,4 +133,4 @@ TEST_F(ArgminInferTest, ArgminInferTestTopK2) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc index c1096310..3b612d29 100644 --- a/mindspore-lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/arithmetic_compare_infer.h" -namespace mindspore { +namespace mindspore::lite { class ArithmeticCompareInferTest : public mindspore::CommonTest { public: @@ -166,4 +166,4 @@ TEST_F(ArithmeticCompareInferTest, ArithmeticCompareInferTest3) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/arithmetic_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/arithmetic_infer_test.cc index fa759509..d4775805 100644 --- a/mindspore-lite/test/ut/nnacl/infer/arithmetic_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/arithmetic_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/arithmetic_infer.h" -namespace mindspore { +namespace mindspore::lite { class ArithmeticInferTest : public mindspore::CommonTest { public: @@ -166,4 +166,4 @@ TEST_F(ArithmeticInferTest, ArithmeticInferTest3) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/assign_add_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/assign_add_infer_test.cc index b2055260..4acac28b 100644 --- a/mindspore-lite/test/ut/nnacl/infer/assign_add_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/assign_add_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/assign_add_infer.h" -namespace mindspore { +namespace mindspore::lite { class AssignAddInferTest : public mindspore::CommonTest { public: @@ -51,4 +51,4 @@ TEST_F(AssignAddInferTest, AssignAddInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/assign_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/assign_infer_test.cc index 739c25be..45e4aeeb 100644 --- a/mindspore-lite/test/ut/nnacl/infer/assign_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/assign_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/assign_infer.h" -namespace mindspore { +namespace mindspore::lite { class AssignInferTest : public mindspore::CommonTest { public: @@ -53,4 +53,4 @@ TEST_F(AssignInferTest, AssignInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc index 14497e73..c1b7b553 100644 --- a/mindspore-lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/audio_spectrogram_infer.h" -namespace mindspore { +namespace mindspore::lite { class AudioSpectrogramInferTest : public mindspore::CommonTest { public: @@ -56,4 +56,4 @@ TEST_F(AudioSpectrogramInferTest, AudioSpectrogramInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc index 2985f2b7..16f190d2 100644 --- a/mindspore-lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/batch_to_space_infer.h" -namespace mindspore { +namespace mindspore::lite { class BatchToSpaceInferTest : public mindspore::CommonTest { public: @@ -180,4 +180,4 @@ TEST_F(BatchToSpaceInferTest, BatchToSpaceInferTest3) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/bias_grad_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/bias_grad_infer_test.cc index 855e0a4e..c7bc2741 100644 --- a/mindspore-lite/test/ut/nnacl/infer/bias_grad_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/bias_grad_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/bias_grad_infer.h" -namespace mindspore { +namespace mindspore::lite { class BiasGradInferTest : public mindspore::CommonTest { public: @@ -56,4 +56,4 @@ TEST_F(BiasGradInferTest, BiasGradInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc index 25c51b06..3edf7fe4 100644 --- a/mindspore-lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/binary_cross_entropy_infer.h" -namespace mindspore { +namespace mindspore::lite { class BinaryCrossEntropyInferTest : public mindspore::CommonTest { public: @@ -82,4 +82,4 @@ TEST_F(BinaryCrossEntropyInferTest, BinaryCrossEntropyInferTest1) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/bn_grad_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/bn_grad_infer_test.cc index 9ea08ec6..b55ec85a 100644 --- a/mindspore-lite/test/ut/nnacl/infer/bn_grad_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/bn_grad_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/bn_grad_infer.h" -namespace mindspore { +namespace mindspore::lite { class BnGradInferTest : public mindspore::CommonTest { public: @@ -79,4 +79,4 @@ TEST_F(BnGradInferTest, BnGradInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc index 34f44a04..b470128e 100644 --- a/mindspore-lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/broadcast_to_infer.h" -namespace mindspore { +namespace mindspore::lite { class BroadcastToInferTest : public mindspore::CommonTest { public: @@ -145,4 +145,4 @@ TEST_F(BroadcastToInferTest, BroadcastToInferTest3) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/cast_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/cast_infer_test.cc index 18b54fce..cf28805f 100644 --- a/mindspore-lite/test/ut/nnacl/infer/cast_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/cast_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/cast_infer.h" -namespace mindspore { +namespace mindspore::lite { class CastInferTest : public mindspore::CommonTest { public: @@ -269,4 +269,4 @@ TEST_F(CastInferTest, CastInferTest5) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/concat_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/concat_infer_test.cc index a3b15dd2..ed74273f 100644 --- a/mindspore-lite/test/ut/nnacl/infer/concat_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/concat_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/concat_infer.h" -namespace mindspore { +namespace mindspore::lite { class ConcatInferTest : public mindspore::CommonTest { public: @@ -236,4 +236,4 @@ TEST_F(ConcatInferTest, ConcatInferTest5) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc index 7f4cbd53..2442e110 100644 --- a/mindspore-lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/constant_of_shape_infer.h" -namespace mindspore { +namespace mindspore::lite { class ConstantOfShapeInferTest : public mindspore::CommonTest { public: @@ -59,4 +59,4 @@ TEST_F(ConstantOfShapeInferTest, ConstantOfShapeInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc index 1d22281e..6f38f259 100644 --- a/mindspore-lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/conv2d_grad_filter_infer.h" -namespace mindspore { +namespace mindspore::lite { class Conv2dGradFilterInferTest : public mindspore::CommonTest { public: @@ -58,4 +58,4 @@ TEST_F(Conv2dGradFilterInferTest, Conv2dGradFilterInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc index cce2634d..c0d15ec8 100644 --- a/mindspore-lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/conv2d_grad_input_infer.h" -namespace mindspore { +namespace mindspore::lite { class Conv2dGradInputInferTest : public mindspore::CommonTest { public: @@ -58,4 +58,4 @@ TEST_F(Conv2dGradInputInferTest, Conv2dGradInputInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/conv2d_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/conv2d_infer_test.cc index 3f88a643..e97b8d36 100644 --- a/mindspore-lite/test/ut/nnacl/infer/conv2d_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/conv2d_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/conv2d_infer.h" -namespace mindspore { +namespace mindspore::lite { class Conv2dInferTest : public mindspore::CommonTest { public: @@ -526,4 +526,4 @@ TEST_F(Conv2dInferTest, Conv2dInferTest10) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc index 5bee3402..9b9b893d 100644 --- a/mindspore-lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/crop_and_resize_infer.h" -namespace mindspore { +namespace mindspore::lite { class CropAndResizeInferTest : public mindspore::CommonTest { public: @@ -121,4 +121,4 @@ TEST_F(CropAndResizeInferTest, CropAndResizeInferTest1) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/crop_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/crop_infer_test.cc index 434d97a1..29ae851b 100644 --- a/mindspore-lite/test/ut/nnacl/infer/crop_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/crop_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/crop_infer.h" -namespace mindspore { +namespace mindspore::lite { class CropInferTest : public mindspore::CommonTest { public: @@ -335,4 +335,4 @@ TEST_F(CropInferTest, CropInferTest5) { } } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/cumsum_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/cumsum_infer_test.cc index 5a09a796..04aaf3fc 100644 --- a/mindspore-lite/test/ut/nnacl/infer/cumsum_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/cumsum_infer_test.cc @@ -17,7 +17,7 @@ #include "nnacl_c/infer/cumsum_infer.h" #include "nnacl_c/cumsum_parameter.h" -namespace mindspore { +namespace mindspore::lite { class CumSumInferTest : public mindspore::CommonTest { public: @@ -59,4 +59,4 @@ TEST_F(CumSumInferTest, Test0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc index c38148aa..ef414417 100644 --- a/mindspore-lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/string/custom_extract_features_infer.h" -namespace mindspore { +namespace mindspore::lite { class CustomExtractFeaturesInferTest : public mindspore::CommonTest { public: @@ -91,4 +91,4 @@ TEST_F(CustomExtractFeaturesInferTest, CustomExtractFeaturesInferTest1) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc index 081aa543..f13585e9 100644 --- a/mindspore-lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/string/custom_normalize_infer.h" -namespace mindspore { +namespace mindspore::lite { class CustomNormalizeInferTest : public mindspore::CommonTest { public: @@ -81,4 +81,4 @@ TEST_F(CustomNormalizeInferTest, CustomNormalizeInferTest1) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/custom_predict_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/custom_predict_infer_test.cc index 415c5c88..76ca2b72 100644 --- a/mindspore-lite/test/ut/nnacl/infer/custom_predict_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/custom_predict_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/string/custom_predict_infer.h" -namespace mindspore { +namespace mindspore::lite { class CustomPredictInferTest : public mindspore::CommonTest { public: @@ -54,4 +54,4 @@ TEST_F(CustomPredictInferTest, CustomPredictInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/deconv2d_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/deconv2d_infer_test.cc index 3e1f9579..95d246e2 100644 --- a/mindspore-lite/test/ut/nnacl/infer/deconv2d_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/deconv2d_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/deconv2d_infer.h" -namespace mindspore { +namespace mindspore::lite { class Deconv2dInferTest : public mindspore::CommonTest { public: @@ -166,4 +166,4 @@ TEST_F(Deconv2dInferTest, Deconv2dInferTest2) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc index 2508e212..81f87a56 100644 --- a/mindspore-lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc @@ -17,7 +17,7 @@ #include "nnacl_c/infer/depth_to_space_infer.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class DepthToSpaceInferTest : public mindspore::CommonTest { public: @@ -172,4 +172,4 @@ TEST_F(DepthToSpaceInferTest, DepthToSpaceInferTest4) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc index cf96e455..837cc42c 100644 --- a/mindspore-lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/depthwise_conv2d_infer.h" -namespace mindspore { +namespace mindspore::lite { class DepthwiseConv2dInferTest : public mindspore::CommonTest { public: @@ -537,4 +537,4 @@ TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest10) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc index 0321ff0c..a8caf400 100644 --- a/mindspore-lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/detection_post_process_infer.h" -namespace mindspore { +namespace mindspore::lite { class DetectionPostProcessInferTest : public mindspore::CommonTest { public: @@ -79,4 +79,4 @@ TEST_F(DetectionPostProcessInferTest, DetectionPostProcessInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc index 9731d71e..cee5e979 100644 --- a/mindspore-lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/dropout_grad_infer.h" -namespace mindspore { +namespace mindspore::lite { class DropoutGradInferTest : public mindspore::CommonTest { public: @@ -53,4 +53,4 @@ TEST_F(DropoutGradInferTest, DropoutGradInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc index 995b129f..533feca3 100644 --- a/mindspore-lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/embedding_lookup_infer.h" -namespace mindspore { +namespace mindspore::lite { class EmbeddingLookupInferTest : public mindspore::CommonTest { public: @@ -64,4 +64,4 @@ TEST_F(EmbeddingLookupInferTest, EmbeddingLookupInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/expand_dims_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/expand_dims_infer_test.cc index 039ae90a..d2aeef78 100644 --- a/mindspore-lite/test/ut/nnacl/infer/expand_dims_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/expand_dims_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/expand_dims_infer.h" -namespace mindspore { +namespace mindspore::lite { class ExpandDimsInferTest : public mindspore::CommonTest { public: @@ -120,4 +120,4 @@ TEST_F(ExpandDimsInferTest, ExpandDimsInferTest2) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/fft_imag_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/fft_imag_infer_test.cc index 36390924..fa602da1 100644 --- a/mindspore-lite/test/ut/nnacl/infer/fft_imag_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/fft_imag_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/fft_imag_infer.h" -namespace mindspore { +namespace mindspore::lite { class FftImagInferTest : public mindspore::CommonTest { public: @@ -55,4 +55,4 @@ TEST_F(FftImagInferTest, FftImagInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/fill_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/fill_infer_test.cc index 451462af..443ef8d4 100644 --- a/mindspore-lite/test/ut/nnacl/infer/fill_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/fill_infer_test.cc @@ -17,7 +17,7 @@ #include "nnacl_c/infer/fill_infer.h" #include "nnacl_c/fill_parameter.h" -namespace mindspore { +namespace mindspore::lite { class FillInferTest : public mindspore::CommonTest { public: @@ -119,4 +119,4 @@ TEST_F(FillInferTest, FillInferTest3) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc index acad0586..1a51ed41 100644 --- a/mindspore-lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/flatten_grad_infer.h" -namespace mindspore { +namespace mindspore::lite { class FlattenGradInferTest : public mindspore::CommonTest { public: @@ -55,4 +55,4 @@ TEST_F(FlattenGradInferTest, FlattenGradInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/flatten_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/flatten_infer_test.cc index 24029984..64910307 100644 --- a/mindspore-lite/test/ut/nnacl/infer/flatten_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/flatten_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/flatten_infer.h" -namespace mindspore { +namespace mindspore::lite { class FlattenInferTest : public mindspore::CommonTest { public: @@ -121,4 +121,4 @@ TEST_F(FlattenInferTest, FlattenInferTest3) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/full_connection_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/full_connection_infer_test.cc index 33f14746..3bd28f70 100644 --- a/mindspore-lite/test/ut/nnacl/infer/full_connection_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/full_connection_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/full_connection_infer.h" -namespace mindspore { +namespace mindspore::lite { class FullConnectionInferTest : public mindspore::CommonTest { public: @@ -119,4 +119,4 @@ TEST_F(FullConnectionInferTest, FullConnectionInferTest2) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc index b5bdd027..e70dccae 100644 --- a/mindspore-lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/fused_batchnorm_infer.h" -namespace mindspore { +namespace mindspore::lite { class FusedBatchNormInferTest : public mindspore::CommonTest { public: @@ -60,4 +60,4 @@ TEST_F(FusedBatchNormInferTest, FusedBatchNormInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/gather_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/gather_infer_test.cc index 5d35f7a8..3a3fb8f2 100644 --- a/mindspore-lite/test/ut/nnacl/infer/gather_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/gather_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/gather_infer.h" -namespace mindspore { +namespace mindspore::lite { class GatherInferTest : public mindspore::CommonTest { public: @@ -186,4 +186,4 @@ TEST_F(GatherInferTest, GatherInferTest4) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/gather_nd_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/gather_nd_infer_test.cc index 5c203271..ccf070f0 100644 --- a/mindspore-lite/test/ut/nnacl/infer/gather_nd_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/gather_nd_infer_test.cc @@ -17,7 +17,7 @@ #include "nnacl_c/infer/gather_nd_infer.h" #include "nnacl_c/gather_nd_parameter.h" -namespace mindspore { +namespace mindspore::lite { class GatherNdInferTest : public mindspore::CommonTest { public: @@ -180,4 +180,4 @@ TEST_F(GatherNdInferTest, GatherNdInferTest4) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc index d2b9b3a7..9bba9c09 100644 --- a/mindspore-lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/group_conv2d_grad_input_infer.h" -namespace mindspore { +namespace mindspore::lite { class GroupConv2dGradInputInferTest : public mindspore::CommonTest { public: @@ -53,4 +53,4 @@ TEST_F(GroupConv2dGradInputInferTest, GroupConv2dGradInputInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/gru_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/gru_infer_test.cc index dde9f8a7..bf395ce3 100644 --- a/mindspore-lite/test/ut/nnacl/infer/gru_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/gru_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/gru_infer.h" -namespace mindspore { +namespace mindspore::lite { class GruInferTest : public mindspore::CommonTest { public: @@ -129,4 +129,4 @@ TEST_F(GruInferTest, GruInferTest1) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc index 37eb8114..f851eb40 100644 --- a/mindspore-lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/string/hashtable_lookup_infer.h" -namespace mindspore { +namespace mindspore::lite { class HashtableLookupInferTest : public mindspore::CommonTest { public: @@ -58,4 +58,4 @@ TEST_F(HashtableLookupInferTest, HashtableLookupInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc index 07467648..9b0e8d5d 100644 --- a/mindspore-lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/invert_permutation_infer.h" -namespace mindspore { +namespace mindspore::lite { class InvertPermutationInferTest : public mindspore::CommonTest { public: @@ -50,4 +50,4 @@ TEST_F(InvertPermutationInferTest, InvertPermutationInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/layer_norm_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/layer_norm_infer_test.cc index 68498187..8743fedd 100644 --- a/mindspore-lite/test/ut/nnacl/infer/layer_norm_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/layer_norm_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/layer_norm_infer.h" -namespace mindspore { +namespace mindspore::lite { class LayerNormInferTest : public mindspore::CommonTest { public: @@ -95,4 +95,4 @@ TEST_F(LayerNormInferTest, LayerNormInferTest2) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc index 72a19e04..991ee3c4 100644 --- a/mindspore-lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/string/lsh_projection_infer.h" -namespace mindspore { +namespace mindspore::lite { class LshProjectionInferTest : public mindspore::CommonTest { public: @@ -120,4 +120,4 @@ TEST_F(LshProjectionInferTest, LshProjectionInferTest2) { } } // note: may be error -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/lstm_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/lstm_infer_test.cc index 5c3434eb..35bca327 100644 --- a/mindspore-lite/test/ut/nnacl/infer/lstm_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/lstm_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/lstm_infer.h" -namespace mindspore { +namespace mindspore::lite { class LstmInferTest : public mindspore::CommonTest { public: @@ -75,4 +75,4 @@ TEST_F(LstmInferTest, LstmInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/matmul_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/matmul_infer_test.cc index e5e966dc..503fd8d1 100644 --- a/mindspore-lite/test/ut/nnacl/infer/matmul_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/matmul_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/matmul_infer.h" -namespace mindspore { +namespace mindspore::lite { class MatmulInferTest : public mindspore::CommonTest { public: @@ -154,4 +154,4 @@ TEST_F(MatmulInferTest, MatmulInferTest3) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/max_min_grad_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/max_min_grad_infer_test.cc index 740a7278..0af09328 100644 --- a/mindspore-lite/test/ut/nnacl/infer/max_min_grad_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/max_min_grad_infer_test.cc @@ -17,7 +17,7 @@ #include "nnacl_c/infer/max_min_grad_infer.h" #include "nnacl_c/arithmetic_parameter.h" -namespace mindspore { +namespace mindspore::lite { class MaxMinGradInferTest : public mindspore::CommonTest { public: @@ -81,4 +81,4 @@ TEST_F(MaxMinGradInferTest, MaxMinGradInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/mfcc_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/mfcc_infer_test.cc index 7efce63b..4ae44c39 100644 --- a/mindspore-lite/test/ut/nnacl/infer/mfcc_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/mfcc_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/mfcc_infer.h" -namespace mindspore { +namespace mindspore::lite { class MfccInferTest : public mindspore::CommonTest { public: @@ -58,4 +58,4 @@ TEST_F(MfccInferTest, MfccInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/nllloss_grad_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/nllloss_grad_infer_test.cc index 0c0480b8..6ff03819 100644 --- a/mindspore-lite/test/ut/nnacl/infer/nllloss_grad_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/nllloss_grad_infer_test.cc @@ -17,7 +17,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/nllloss_grad_infer.h" -namespace mindspore { +namespace mindspore::lite { class TestNLLLossGradInfer : public mindspore::CommonTest { public: TestNLLLossGradInfer() {} @@ -107,4 +107,4 @@ TEST_F(TestNLLLossGradInfer, ReductionMean) { CheckResults(ret, param, outputs); NLLLossGradInferReleaseResources(param, inputs, outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/nllloss_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/nllloss_infer_test.cc index 416d2299..2023ed48 100644 --- a/mindspore-lite/test/ut/nnacl/infer/nllloss_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/nllloss_infer_test.cc @@ -17,7 +17,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/nllloss_infer.h" -namespace mindspore { +namespace mindspore::lite { class TestNLLLossInfer : public mindspore::CommonTest { public: TestNLLLossInfer() {} @@ -95,4 +95,4 @@ TEST_F(TestNLLLossInfer, ReductionMean) { ASSERT_EQ(outputs[1]->shape_size_, 0); NLLLossInferReleaseResources(param, inputs, outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/one_hot_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/one_hot_infer_test.cc index 0752eafd..3d389181 100644 --- a/mindspore-lite/test/ut/nnacl/infer/one_hot_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/one_hot_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/one_hot_infer.h" -namespace mindspore { +namespace mindspore::lite { class OneHotInferTest : public mindspore::CommonTest { public: @@ -57,4 +57,4 @@ TEST_F(OneHotInferTest, OneHotInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/pad_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/pad_infer_test.cc index b66b40a5..e1304989 100644 --- a/mindspore-lite/test/ut/nnacl/infer/pad_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/pad_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/pad_infer.h" -namespace mindspore { +namespace mindspore::lite { class PadInferTest : public mindspore::CommonTest { public: @@ -185,4 +185,4 @@ TEST_F(PadInferTest, PadInferTest4) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc index bb776ea3..c7817bc7 100644 --- a/mindspore-lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/pooling_grad_infer.h" -namespace mindspore { +namespace mindspore::lite { class PoolingGradInferTest : public mindspore::CommonTest { public: @@ -65,4 +65,4 @@ TEST_F(PoolingGradInferTest, PoolingGradInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/pooling_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/pooling_infer_test.cc index 9f4d72d5..481b9e7b 100644 --- a/mindspore-lite/test/ut/nnacl/infer/pooling_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/pooling_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/pooling_infer.h" -namespace mindspore { +namespace mindspore::lite { class PoolingInferTest : public mindspore::CommonTest { public: @@ -267,4 +267,4 @@ TEST_F(PoolingInferTest, PoolingInferTest5) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/power_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/power_infer_test.cc index 0047b71d..0fb07a1d 100644 --- a/mindspore-lite/test/ut/nnacl/infer/power_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/power_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/power_infer.h" -namespace mindspore { +namespace mindspore::lite { class PowerInferTest : public mindspore::CommonTest { public: @@ -109,4 +109,4 @@ TEST_F(PowerInferTest, PowerInferTest2) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc index 7c99fb15..e7f35641 100644 --- a/mindspore-lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/quant_dtype_cast_infer.h" -namespace mindspore { +namespace mindspore::lite { class QuantDtypeCastInferTest : public mindspore::CommonTest { public: @@ -52,4 +52,4 @@ TEST_F(QuantDtypeCastInferTest, QuantDtypeCastInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc index c1c6e56e..d05328b0 100644 --- a/mindspore-lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/random_standard_normal_infer.h" -namespace mindspore { +namespace mindspore::lite { class RandomStandardNormalInferTest : public mindspore::CommonTest { public: @@ -55,4 +55,4 @@ TEST_F(RandomStandardNormalInferTest, RandomStandardNormalInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/range_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/range_infer_test.cc index 8f40763d..4d24aab4 100644 --- a/mindspore-lite/test/ut/nnacl/infer/range_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/range_infer_test.cc @@ -17,7 +17,7 @@ #include "nnacl_c/infer/range_infer.h" #include "nnacl_c/range_parameter.h" -namespace mindspore { +namespace mindspore::lite { class RangeInferTest : public mindspore::CommonTest { public: @@ -134,4 +134,4 @@ TEST_F(RangeInferTest, RangeInferTest2) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/rank_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/rank_infer_test.cc index c2c99e47..0fac0d86 100644 --- a/mindspore-lite/test/ut/nnacl/infer/rank_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/rank_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/rank_infer.h" -namespace mindspore { +namespace mindspore::lite { class RankInferTest : public mindspore::CommonTest { public: @@ -49,4 +49,4 @@ TEST_F(RankInferTest, RankInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/reduce_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/reduce_infer_test.cc index a5bf6ed7..ec67d9d8 100644 --- a/mindspore-lite/test/ut/nnacl/infer/reduce_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/reduce_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/reduce_infer.h" -namespace mindspore { +namespace mindspore::lite { class ReduceInferTest : public mindspore::CommonTest { public: @@ -189,4 +189,4 @@ TEST_F(ReduceInferTest, ReduceInferTest4) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/reshape_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/reshape_infer_test.cc index 47b4be80..6d8cf813 100644 --- a/mindspore-lite/test/ut/nnacl/infer/reshape_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/reshape_infer_test.cc @@ -17,7 +17,7 @@ #include "nnacl_c/infer/reshape_infer.h" #include "nnacl_c/reshape_parameter.h" -namespace mindspore { +namespace mindspore::lite { class ReshapeInferTest : public mindspore::CommonTest { public: @@ -330,4 +330,4 @@ TEST_F(ReshapeInferTest, ReshapeInferTest9) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/resize_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/resize_infer_test.cc index 4aaf0a75..9f8260e9 100644 --- a/mindspore-lite/test/ut/nnacl/infer/resize_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/resize_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/resize_infer.h" -namespace mindspore { +namespace mindspore::lite { class ResizeInferTest : public mindspore::CommonTest { public: @@ -166,4 +166,4 @@ TEST_F(ResizeInferTest, ResizeInferTest3) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/rfft_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/rfft_infer_test.cc index ce630250..e86097fd 100644 --- a/mindspore-lite/test/ut/nnacl/infer/rfft_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/rfft_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/rfft_infer.h" -namespace mindspore { +namespace mindspore::lite { class RfftInferTest : public mindspore::CommonTest { public: @@ -51,4 +51,4 @@ TEST_F(RfftInferTest, RfftInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc index ee8cc2a7..01bcd26a 100644 --- a/mindspore-lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/roi_pooling_infer.h" -namespace mindspore { +namespace mindspore::lite { class ROIPoolingInferTest : public mindspore::CommonTest { public: @@ -58,4 +58,4 @@ TEST_F(ROIPoolingInferTest, ROIPoolingInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/scatter_nd_add_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/scatter_nd_add_infer_test.cc index 5d3813c8..cd3ee3a8 100644 --- a/mindspore-lite/test/ut/nnacl/infer/scatter_nd_add_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/scatter_nd_add_infer_test.cc @@ -18,7 +18,7 @@ #include "nnacl_c/scatter_nd_parameter.h" #include "nnacl_c/infer/scatter_nd_update_infer.h" -namespace mindspore { +namespace mindspore::lite { class TestScatterNdAddInfer : public mindspore::CommonTest { public: TestScatterNdAddInfer() {} @@ -77,4 +77,4 @@ TEST_F(TestScatterNdAddInfer, FourDims) { ASSERT_EQ(outputs[0]->shape_[3], 6); ScatterNdAddInferReleaseResources(param, inputs, outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc index 3600d37d..af42a537 100644 --- a/mindspore-lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/scatter_nd_infer.h" -namespace mindspore { +namespace mindspore::lite { class ScatterNdInferTest : public mindspore::CommonTest { public: @@ -57,4 +57,4 @@ TEST_F(ScatterNdInferTest, ScatterNdInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/select_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/select_infer_test.cc index 67f74a02..3507fc55 100644 --- a/mindspore-lite/test/ut/nnacl/infer/select_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/select_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/select_infer.h" -namespace mindspore { +namespace mindspore::lite { class SelectInferTest : public mindspore::CommonTest { public: @@ -234,4 +234,4 @@ TEST_F(SelectInferTest, SelectInferTest3) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/sgd_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/sgd_infer_test.cc index 340bc8bf..810c21d2 100644 --- a/mindspore-lite/test/ut/nnacl/infer/sgd_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/sgd_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/sgd_infer.h" -namespace mindspore { +namespace mindspore::lite { class SgdInferTest : public mindspore::CommonTest { public: @@ -62,4 +62,4 @@ TEST_F(SgdInferTest, SgdInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/shape_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/shape_infer_test.cc index 920f589d..106d2cb7 100644 --- a/mindspore-lite/test/ut/nnacl/infer/shape_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/shape_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/shape_infer.h" -namespace mindspore { +namespace mindspore::lite { class ShapeInferTest : public mindspore::CommonTest { public: @@ -47,4 +47,4 @@ TEST_F(ShapeInferTest, ShapeInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/size_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/size_infer_test.cc index 2325f9f8..1b23209d 100644 --- a/mindspore-lite/test/ut/nnacl/infer/size_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/size_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/size_infer.h" -namespace mindspore { +namespace mindspore::lite { class SizeInferTest : public mindspore::CommonTest { public: @@ -51,4 +51,4 @@ TEST_F(SizeInferTest, SizeInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/skip_gram_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/skip_gram_infer_test.cc index 9669e060..50356891 100644 --- a/mindspore-lite/test/ut/nnacl/infer/skip_gram_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/skip_gram_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/string/skip_gram_infer.h" -namespace mindspore { +namespace mindspore::lite { class SkipGramInferTest : public mindspore::CommonTest { public: @@ -47,4 +47,4 @@ TEST_F(SkipGramInferTest, SkipGramInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/slice_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/slice_infer_test.cc index 087afc9c..87816a1d 100644 --- a/mindspore-lite/test/ut/nnacl/infer/slice_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/slice_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/slice_infer.h" -namespace mindspore { +namespace mindspore::lite { class SliceInferTest : public mindspore::CommonTest { public: @@ -202,4 +202,4 @@ TEST_F(SliceInferTest, SliceInferTest3) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc index b1cb2170..2b37921e 100644 --- a/mindspore-lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/softmax_cross_entropy_infer.h" -namespace mindspore { +namespace mindspore::lite { class SoftmaxCrossEntropyInferTest : public mindspore::CommonTest { public: @@ -58,4 +58,4 @@ TEST_F(SoftmaxCrossEntropyInferTest, SoftmaxCrossEntropyInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/softmax_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/softmax_infer_test.cc index 0b440079..248742eb 100644 --- a/mindspore-lite/test/ut/nnacl/infer/softmax_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/softmax_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/softmax_infer.h" -namespace mindspore { +namespace mindspore::lite { class SoftmaxInferTest : public mindspore::CommonTest { public: @@ -52,4 +52,4 @@ TEST_F(SoftmaxInferTest, SoftmaxInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc index 4786e258..622f341f 100644 --- a/mindspore-lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/space_to_batch_infer.h" -namespace mindspore { +namespace mindspore::lite { class SpaceToBatchInferTest : public mindspore::CommonTest { public: @@ -171,4 +171,4 @@ TEST_F(SpaceToBatchInferTest, SpaceToBatchInferTest3) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc index 0a160e43..5f84daa2 100644 --- a/mindspore-lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/space_to_batch_nd_infer.h" -namespace mindspore { +namespace mindspore::lite { class SpaceToBatchNdInferTest : public mindspore::CommonTest { public: @@ -172,4 +172,4 @@ TEST_F(SpaceToBatchNdInferTest, SpaceToBatchNdInferTest3) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc index ea69414f..669400c3 100644 --- a/mindspore-lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/space_to_depth_infer.h" -namespace mindspore { +namespace mindspore::lite { class SpaceToDepthInferTest : public mindspore::CommonTest { public: @@ -85,4 +85,4 @@ TEST_F(SpaceToDepthInferTest, SpaceToDepthInferTest1) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc index 76bf4f90..4bbebeee 100644 --- a/mindspore-lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/sparse_to_dense_infer.h" -namespace mindspore { +namespace mindspore::lite { class SparseToDenseInferTest : public mindspore::CommonTest { public: @@ -55,4 +55,4 @@ TEST_F(SparseToDenseInferTest, SparseToDenseInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/split_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/split_infer_test.cc index f755188d..add3a376 100644 --- a/mindspore-lite/test/ut/nnacl/infer/split_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/split_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/split_infer.h" -namespace mindspore { +namespace mindspore::lite { class SplitInferTest : public mindspore::CommonTest { public: @@ -216,4 +216,4 @@ TEST_F(SplitInferTest, SplitInferTest4) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/squeeze_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/squeeze_infer_test.cc index 5b181563..770ae9e1 100644 --- a/mindspore-lite/test/ut/nnacl/infer/squeeze_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/squeeze_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/squeeze_infer.h" -namespace mindspore { +namespace mindspore::lite { class SqueezeInferTest : public mindspore::CommonTest { public: @@ -144,4 +144,4 @@ TEST_F(SqueezeInferTest, SqueezeInferTest3) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/stack_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/stack_infer_test.cc index 65435538..393c998c 100644 --- a/mindspore-lite/test/ut/nnacl/infer/stack_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/stack_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/stack_infer.h" -namespace mindspore { +namespace mindspore::lite { class StackInferTest : public mindspore::CommonTest { public: @@ -89,4 +89,4 @@ TEST_F(StackInferTest, StackInferTest1) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/strided_slice_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/strided_slice_infer_test.cc index 6a4ab89c..6c13f02c 100644 --- a/mindspore-lite/test/ut/nnacl/infer/strided_slice_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/strided_slice_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/strided_slice_infer.h" -namespace mindspore { +namespace mindspore::lite { class StridedSliceInferTest : public mindspore::CommonTest { public: @@ -304,4 +304,4 @@ TEST_F(StridedSliceInferTest, StridedSliceInferTest6) { delete parameter; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc index eafde941..ddfff364 100644 --- a/mindspore-lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc @@ -17,7 +17,7 @@ #include "src/common/tensor_util.h" #include "nnacl_c/infer/control/tensorlist_fromtensor_infer.h" -namespace mindspore { +namespace mindspore::lite { class TensorlistFromtensorInferTest : public mindspore::CommonTest { public: @@ -69,4 +69,4 @@ TEST_F(TensorlistFromtensorInferTest, TensorlistFromtensorInferTest0) { free(out); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc index 2a235823..77474e48 100644 --- a/mindspore-lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc @@ -17,7 +17,7 @@ #include "src/common/tensor_util.h" #include "nnacl_c/infer/control/tensorlist_getitem_infer.h" -namespace mindspore { +namespace mindspore::lite { class TensorlistGetItemInferTest : public mindspore::CommonTest { public: @@ -86,4 +86,4 @@ TEST_F(TensorlistGetItemInferTest, TensorlistGetItemInferTest0) { // retest mergeshape -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc index fe826238..62dd6292 100644 --- a/mindspore-lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc @@ -17,7 +17,7 @@ #include "src/common/tensor_util.h" #include "nnacl_c/infer/control/tensorlist_reserve_infer.h" -namespace mindspore { +namespace mindspore::lite { class TensorlistReserveInferTest : public mindspore::CommonTest { public: @@ -66,4 +66,4 @@ TEST_F(TensorlistReserveInferTest, TensorlistReserveInferTest0) { free(out); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc index 80a600d2..63dc785d 100644 --- a/mindspore-lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc @@ -17,7 +17,7 @@ #include "src/common/tensor_util.h" #include "nnacl_c/infer/control/tensorlist_setitem_infer.h" -namespace mindspore { +namespace mindspore::lite { class TensorlistSetItemInferTest : public mindspore::CommonTest { public: @@ -108,4 +108,4 @@ TEST_F(TensorlistSetItemInferTest, TensorlistSetItemInferTest0) { // retest mergeshape -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc index 8095670e..9fe13893 100644 --- a/mindspore-lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/control/tensorlist_stack_infer.h" -namespace mindspore { +namespace mindspore::lite { class TensorlistStackInferTest : public mindspore::CommonTest { public: @@ -86,4 +86,4 @@ TEST_F(TensorlistStackInferTest, TensorlistStackInferTest0) { // retest mergeshape -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/tile_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/tile_infer_test.cc index 316c9f3f..49c1c01a 100644 --- a/mindspore-lite/test/ut/nnacl/infer/tile_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/tile_infer_test.cc @@ -18,7 +18,7 @@ #include "nnacl_c/base/tile_base.h" #include "nnacl_c/tile_parameter.h" -namespace mindspore { +namespace mindspore::lite { class TileInferTest : public mindspore::CommonTest { public: @@ -98,4 +98,4 @@ TEST_F(TileInferTest, TileInferTest1) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/topk_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/topk_infer_test.cc index 55e98524..45297424 100644 --- a/mindspore-lite/test/ut/nnacl/infer/topk_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/topk_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/topk_infer.h" -namespace mindspore { +namespace mindspore::lite { class TopKInferTest : public mindspore::CommonTest { public: @@ -95,4 +95,4 @@ TEST_F(TopKInferTest, TopKInferInputsSize2) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/transpose_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/transpose_infer_test.cc index 4096d1cd..932a8617 100644 --- a/mindspore-lite/test/ut/nnacl/infer/transpose_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/transpose_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/transpose_infer.h" -namespace mindspore { +namespace mindspore::lite { class TransposeInferTest : public mindspore::CommonTest { public: @@ -57,4 +57,4 @@ TEST_F(TransposeInferTest, TransposeInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/unique_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/unique_infer_test.cc index 534921c9..9a73d3ab 100644 --- a/mindspore-lite/test/ut/nnacl/infer/unique_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/unique_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/unique_infer.h" -namespace mindspore { +namespace mindspore::lite { class UniqueInferTest : public mindspore::CommonTest { public: @@ -52,4 +52,4 @@ TEST_F(UniqueInferTest, UniqueInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc index 4f14be0f..9020441a 100644 --- a/mindspore-lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/unsorted_segment_sum_infer.h" -namespace mindspore { +namespace mindspore::lite { class UnsortedSegmentSumInferTest : public mindspore::CommonTest { public: @@ -57,4 +57,4 @@ TEST_F(UnsortedSegmentSumInferTest, UnsortedSegmentSumInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc index bcf3bd19..10f75e03 100644 --- a/mindspore-lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc @@ -17,7 +17,7 @@ #include "nnacl_c/infer/unsqueeze_infer.h" #include "nnacl_c/unsqueeze_parameter.h" -namespace mindspore { +namespace mindspore::lite { class UnsqueezeInferTest : public mindspore::CommonTest { public: @@ -196,4 +196,4 @@ TEST_F(UnsqueezeInferTest, UnsqueezeInferTest5) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/unstack_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/unstack_infer_test.cc index c5bcb70c..2a61f073 100644 --- a/mindspore-lite/test/ut/nnacl/infer/unstack_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/unstack_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/unstack_infer.h" -namespace mindspore { +namespace mindspore::lite { class UnstackInferTest : public mindspore::CommonTest { public: @@ -54,4 +54,4 @@ TEST_F(UnstackInferTest, UnstackInferTest0) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/infer/where_infer_test.cc b/mindspore-lite/test/ut/nnacl/infer/where_infer_test.cc index f24572b8..0f03d20e 100644 --- a/mindspore-lite/test/ut/nnacl/infer/where_infer_test.cc +++ b/mindspore-lite/test/ut/nnacl/infer/where_infer_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/infer/where_infer.h" -namespace mindspore { +namespace mindspore::lite { class WhereInferTest : public mindspore::CommonTest { public: @@ -84,4 +84,4 @@ TEST_F(WhereInferTest, WhereInferTest1) { delete outputs[i]; } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/int8/quant_dtype_cast_int8_test.cc b/mindspore-lite/test/ut/nnacl/int8/quant_dtype_cast_int8_test.cc index 18c2074e..35dfd89a 100644 --- a/mindspore-lite/test/ut/nnacl/int8/quant_dtype_cast_int8_test.cc +++ b/mindspore-lite/test/ut/nnacl/int8/quant_dtype_cast_int8_test.cc @@ -21,7 +21,7 @@ #include "nnacl_c/fp16/quant_dtype_cast_fp16.h" #endif -namespace mindspore { +namespace mindspore::lite { class QuantCastInt8Test : public mindspore::CommonTest { public: @@ -355,4 +355,4 @@ TEST_F(QuantCastInt8Test, Fp32Int8Inf) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/nnacl/kernel/cast_test.cc b/mindspore-lite/test/ut/nnacl/kernel/cast_test.cc index ef8db4b3..7ebf7490 100644 --- a/mindspore-lite/test/ut/nnacl/kernel/cast_test.cc +++ b/mindspore-lite/test/ut/nnacl/kernel/cast_test.cc @@ -20,7 +20,7 @@ #include "nnacl_c/base/cast_base.h" #include "nnacl_c/kernel/cast.h" -namespace mindspore { +namespace mindspore::lite { class CastTest : public mindspore::CommonTest { public: CastTest() {} @@ -248,4 +248,4 @@ TEST_F(CastTest, Int32ToFp16Test) { } } #endif -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/api/context_c_test.cc b/mindspore-lite/test/ut/src/api/context_c_test.cc index 18ee04e4..3b744d7f 100644 --- a/mindspore-lite/test/ut/src/api/context_c_test.cc +++ b/mindspore-lite/test/ut/src/api/context_c_test.cc @@ -16,7 +16,7 @@ #include "include/c_api/context_c.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class ContextCTest : public mindspore::CommonTest { public: ContextCTest() {} @@ -70,4 +70,4 @@ TEST_F(ContextCTest, common_test) { MSContextAddDeviceInfo(context, npu_device_info); MSContextDestroy(&context); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/api/generic_api_test.cc b/mindspore-lite/test/ut/src/api/generic_api_test.cc index 4c52f851..78239387 100644 --- a/mindspore-lite/test/ut/src/api/generic_api_test.cc +++ b/mindspore-lite/test/ut/src/api/generic_api_test.cc @@ -20,7 +20,7 @@ #include "src/runtime/cxx_api/converters.h" #include "src/common/context_util.h" -namespace mindspore { +namespace mindspore::lite { class GenericApiTest : public mindspore::CommonTest { public: GenericApiTest() {} @@ -67,4 +67,4 @@ TEST_F(GenericApiTest, TestConvertInnerContextToContext) { delete inner_ctx; delete ctx; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/api/model_c_test.cc b/mindspore-lite/test/ut/src/api/model_c_test.cc index 47f30e86..e6911776 100644 --- a/mindspore-lite/test/ut/src/api/model_c_test.cc +++ b/mindspore-lite/test/ut/src/api/model_c_test.cc @@ -21,7 +21,7 @@ #include "src/common/file_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class ModelCApiTest : public mindspore::CommonTest { public: ModelCApiTest() {} @@ -518,4 +518,4 @@ TEST(ModelCApiTest, TrainExportWeightsMicro) { ASSERT_EQ(status, kMSStatusSuccess); MSModelDestroy(&model); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/api/model_group_test.cc b/mindspore-lite/test/ut/src/api/model_group_test.cc index e2364da9..9dd29c7e 100644 --- a/mindspore-lite/test/ut/src/api/model_group_test.cc +++ b/mindspore-lite/test/ut/src/api/model_group_test.cc @@ -19,7 +19,7 @@ #include "include/api/model_group.h" #include "src/common/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { const char in_data_path[] = "./mobilenetv2.ms.bin"; const char model_path_1[] = "./mobilenetv2.ms"; @@ -221,4 +221,4 @@ TEST_F(ModelGroupTest, ModelGroupPredict) { status = model_2->Predict(inputs_2, &outputs_2); ASSERT_EQ(status, kSuccess); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/api/model_parallel_runner_test.cc b/mindspore-lite/test/ut/src/api/model_parallel_runner_test.cc index 5c2954bc..0cba0114 100644 --- a/mindspore-lite/test/ut/src/api/model_parallel_runner_test.cc +++ b/mindspore-lite/test/ut/src/api/model_parallel_runner_test.cc @@ -18,7 +18,7 @@ #include "common/common_test.h" #include "src/common/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { const char in_data_path[] = "./mobilenetv2.ms.bin"; const char model_path[] = "./mobilenetv2.ms"; @@ -240,4 +240,4 @@ TEST_F(ModelParallelRunnerTest, RunnerInitByBuf) { tensor.SetData(nullptr); } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/api/tensor_c_test.cc b/mindspore-lite/test/ut/src/api/tensor_c_test.cc index 4b58abaa..ed177da6 100644 --- a/mindspore-lite/test/ut/src/api/tensor_c_test.cc +++ b/mindspore-lite/test/ut/src/api/tensor_c_test.cc @@ -16,7 +16,7 @@ #include "include/c_api/tensor_c.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TensorCTest : public mindspore::CommonTest { public: TensorCTest() {} @@ -88,4 +88,4 @@ TEST_F(TensorCTest, common_test) { MSTensorDestroy(&tensor); MSTensorDestroy(&clone); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/dynamic_library_loader_test.cc b/mindspore-lite/test/ut/src/dynamic_library_loader_test.cc index f9e7c2e2..b8e4b3f0 100644 --- a/mindspore-lite/test/ut/src/dynamic_library_loader_test.cc +++ b/mindspore-lite/test/ut/src/dynamic_library_loader_test.cc @@ -1,43 +1,43 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/common/dynamic_library_loader.h" -#include "common/common_test.h" - -namespace mindspore { -class LoaderUtilTest : public mindspore::CommonTest { - public: - LoaderUtilTest() {} -}; - -/* - in file add.c, the code is: - int add(int a, int b) {return a + b;} - use this command to generate so file: - gcc add.cc -fPIC -shared -o libadd.so - use this command to see the symbol table: - nm -D libadd.so -*/ -TEST_F(LoaderUtilTest, TestAdd) { - lite::DynamicLibraryLoader loader; - loader.Open("./libadd.so"); - int (*add)(int a, int b); - add = (int (*)(int, int))loader.GetFunc("add"); - int res = add(7, 8); - loader.Close(); - ASSERT_EQ(15, res); -} -} // namespace mindspore +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/common/dynamic_library_loader.h" +#include "common/common_test.h" + +namespace mindspore::lite { +class LoaderUtilTest : public mindspore::CommonTest { + public: + LoaderUtilTest() {} +}; + +/* + in file add.c, the code is: + int add(int a, int b) {return a + b;} + use this command to generate so file: + gcc add.cc -fPIC -shared -o libadd.so + use this command to see the symbol table: + nm -D libadd.so +*/ +TEST_F(LoaderUtilTest, TestAdd) { + lite::DynamicLibraryLoader loader; + loader.Open("./libadd.so"); + int (*add)(int a, int b); + add = (int (*)(int, int))loader.GetFunc("add"); + int res = add(7, 8); + loader.Close(); + ASSERT_EQ(15, res); +} +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/graph_test.cc b/mindspore-lite/test/ut/src/graph_test.cc index 20b8f792..dfba2dd4 100644 --- a/mindspore-lite/test/ut/src/graph_test.cc +++ b/mindspore-lite/test/ut/src/graph_test.cc @@ -26,7 +26,7 @@ #include "src/litert/executor.h" #include "schema/inner/anf_ir_generated.h" -namespace mindspore { +namespace mindspore::lite { class TestLiteInference : public mindspore::CommonTest { public: TestLiteInference() {} @@ -80,4 +80,4 @@ char *ReadModelFile(const char *file, size_t *size) { return buf.release(); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/infer_test.cc b/mindspore-lite/test/ut/src/infer_test.cc index e19ff5cf..aad732a3 100644 --- a/mindspore-lite/test/ut/src/infer_test.cc +++ b/mindspore-lite/test/ut/src/infer_test.cc @@ -23,7 +23,7 @@ #include "src/litert/lite_session.h" #include "src/common/file_utils.h" -namespace mindspore { +namespace mindspore::lite { class InferTest : public mindspore::CommonTest { public: InferTest() {} @@ -248,4 +248,4 @@ TEST_F(InferTest, TestModel) { MS_LOG(INFO) << "Passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/lite_mindrt_test.cc b/mindspore-lite/test/ut/src/lite_mindrt_test.cc index e7dbf6db..23878ad8 100644 --- a/mindspore-lite/test/ut/src/lite_mindrt_test.cc +++ b/mindspore-lite/test/ut/src/lite_mindrt_test.cc @@ -23,7 +23,7 @@ #include "common/common_test.h" #include "schema/model_generated.h" -namespace mindspore { +namespace mindspore::lite { class LiteMindRtTest : public mindspore::CommonTest { public: LiteMindRtTest() {} @@ -82,4 +82,4 @@ TEST_F(LiteMindRtTest, ActorThreadPoolTest) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/registry/registry_custom_op_test.cc b/mindspore-lite/test/ut/src/registry/registry_custom_op_test.cc index 81b13063..8d408c3f 100644 --- a/mindspore-lite/test/ut/src/registry/registry_custom_op_test.cc +++ b/mindspore-lite/test/ut/src/registry/registry_custom_op_test.cc @@ -29,7 +29,7 @@ using mindspore::kernel::Kernel; using mindspore::kernel::KernelInterface; using mindspore::schema::PrimitiveType_AddFusion; -namespace mindspore { +namespace mindspore::lite { namespace { const char *const kKeyName = "test_key"; const char *const kTestData = "test_data"; @@ -228,4 +228,4 @@ TEST_F(TestRegistryCustomOp, TestCustomAdd) { ASSERT_EQ(TestData::GetInstance()->data_, kTestData); MS_LOG(INFO) << "Register add op test pass."; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/registry/registry_gpu_custom_op_test.cc b/mindspore-lite/test/ut/src/registry/registry_gpu_custom_op_test.cc index 18d308d7..bff06c06 100644 --- a/mindspore-lite/test/ut/src/registry/registry_gpu_custom_op_test.cc +++ b/mindspore-lite/test/ut/src/registry/registry_gpu_custom_op_test.cc @@ -34,7 +34,7 @@ using mindspore::schema::PrimitiveType_AddFusion; #define UP_ROUND(x, y) (((x) + (y) - (1)) / (y) * (y)) #define UP_DIV(x, y) (((x) + (y) - (1)) / (y)) -namespace mindspore { +namespace mindspore::lite { namespace { constexpr int kDimIndex2 = 2; constexpr int kDimIndex3 = 3; @@ -538,4 +538,4 @@ TEST_F(TestGPURegistryCustomOp, TestGPUCustomAdd) { ASSERT_EQ(30.0f, outData[0]); MS_LOG(INFO) << "Register add op test pass."; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/registry/registry_test.cc b/mindspore-lite/test/ut/src/registry/registry_test.cc index e6e2cdc8..18fff55e 100644 --- a/mindspore-lite/test/ut/src/registry/registry_test.cc +++ b/mindspore-lite/test/ut/src/registry/registry_test.cc @@ -28,7 +28,7 @@ using mindspore::kernel::Kernel; using mindspore::kernel::KernelInterface; using mindspore::schema::PrimitiveType_AddFusion; -namespace mindspore { +namespace mindspore::lite { class TestCustomAdd : public Kernel { public: TestCustomAdd(const std::vector &inputs, const std::vector &outputs, @@ -187,4 +187,4 @@ TEST_F(TestRegistry, TestAdd) { MS_LOG(INFO) << "Register add op test pass."; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/dynamic_mem_manager_test.cc b/mindspore-lite/test/ut/src/runtime/dynamic_mem_manager_test.cc index 5128c833..90c6809a 100644 --- a/mindspore-lite/test/ut/src/runtime/dynamic_mem_manager_test.cc +++ b/mindspore-lite/test/ut/src/runtime/dynamic_mem_manager_test.cc @@ -21,7 +21,7 @@ #include "src/extendrt/numa_adapter.h" #undef private -namespace mindspore { +namespace mindspore::lite { namespace { constexpr size_t kAllocUnitSize = 256 * 1024 * 1024; static constexpr size_t kMinimumAllocUnitSize = 64 * 1024 * 1024; @@ -539,6 +539,6 @@ TEST_F(DynamicMemManagerTest, test_set_ref_count) { ref_count = mem->DecRefCount(data, 1); ASSERT_EQ(ref_count, 2); } -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc index 9c2fd933..1d4229b8 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/common/pack_tests.cc @@ -26,7 +26,7 @@ #include "nnacl_c/fp16/conv_fp16.h" #endif -namespace mindspore { +namespace mindspore::lite { class TestPack : public mindspore::CommonTest { public: TestPack() {} @@ -174,4 +174,4 @@ TEST_F(TestPack, PackInputFp16) { } #endif -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc index c51aa18c..872b8b6f 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc @@ -19,7 +19,7 @@ #include "nnacl_c/strided_slice_parameter.h" #include "nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestStridedSlice : public mindspore::CommonTest { public: TestStridedSlice() {} @@ -317,4 +317,4 @@ TEST_F(TestStridedSlice, StridedSliceInt8) { delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/cxx_api/model_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/cxx_api/model_test.cc index 373402f8..4f70b0cc 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/cxx_api/model_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/cxx_api/model_test.cc @@ -20,7 +20,7 @@ #include "include/api/serialization.h" #include "include/api/metrics/accuracy.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr int32_t kNumThreads = 2; constexpr int NUM_OF_CLASSES = 10; @@ -250,4 +250,4 @@ TEST_F(TestCxxApiLiteModel, set_weights_FAILURE) { delete (tensor); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/cxx_api/serialization_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/cxx_api/serialization_test.cc index fa64020d..0b14f485 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/cxx_api/serialization_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/cxx_api/serialization_test.cc @@ -19,7 +19,7 @@ #include "common/common_test.h" #include "include/api/serialization.h" -namespace mindspore { +namespace mindspore::lite { class TestCxxApiLiteSerialization : public mindspore::CommonTest { public: TestCxxApiLiteSerialization() = default; @@ -83,4 +83,4 @@ TEST_F(TestCxxApiLiteSerialization, test_export_to_buffer) { ASSERT_EQ(result, 0); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp16_grad/activation_grad_fp16_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp16_grad/activation_grad_fp16_test.cc index 75f20b16..d2dd17a4 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp16_grad/activation_grad_fp16_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp16_grad/activation_grad_fp16_test.cc @@ -23,7 +23,7 @@ #include "src/common/file_utils.h" #include "nnacl_c/fp16_grad/activation_grad_fp16.h" -namespace mindspore { +namespace mindspore::lite { class TestActGradFp16 : public mindspore::CommonTest { public: TestActGradFp16() {} @@ -142,4 +142,4 @@ TEST_F(TestActGradFp16, SigmoidGradFp16) { MS_LOG(INFO) << "SigmoidGradFp16 passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad_tests.cc index 3d5a767d..dff7ea3c 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad_tests.cc @@ -23,7 +23,7 @@ #include "src/common/file_utils.h" #include "nnacl_c/fp16_grad/arithmetic_self_grad.h" -namespace mindspore { +namespace mindspore::lite { class TestArithmeticSelfGradFp16 : public mindspore::CommonTest { public: TestArithmeticSelfGradFp16() {} @@ -84,4 +84,4 @@ TEST_F(TestArithmeticSelfGradFp16, LogGradFp16) { MS_LOG(INFO) << "LogGradFp16 passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32-sparsity/matmul_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32-sparsity/matmul_fp32_tests.cc index a19d152c..c9cfe7a4 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32-sparsity/matmul_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32-sparsity/matmul_fp32_tests.cc @@ -27,7 +27,7 @@ #include "src/litert/kernel/cpu/fp32/matmul_fp32.h" #include "src/litert/kernel/cpu/fp32_sparse/matmul_sparse_fp32.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::Tensor; class TestSPMMFp32 : public mindspore::CommonTest { @@ -427,4 +427,4 @@ TEST_F(TestSPMMFp32Performance, SparsityMatmul) { ret = this->TestSparseMVM(); EXPECT_EQ(ret, true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc index 2cab2292..5942f28e 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc @@ -20,7 +20,7 @@ #include "src/executor/kernel_exec.h" #include "nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestActivationFp32 : public mindspore::CommonTest { public: @@ -292,4 +292,4 @@ TEST_F(TestActivationFp32, Softplus) { output0_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc index a89a7338..ec5bbd67 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/batch_to_space_fp32_test.cc @@ -19,7 +19,7 @@ #include "nnacl_c/batch_to_space_parameter.h" #include "nnacl_c/common_func.h" -namespace mindspore { +namespace mindspore::lite { class TestBatchToSpaceFp32 : public mindspore::CommonTest { public: @@ -191,4 +191,4 @@ TEST_F(TestBatchToSpaceFp32, BatchToSpaceTest_crop_4) { ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc index 2943341b..e0b183bc 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc @@ -19,7 +19,7 @@ #include "nnacl_c/batchnorm_parameter.h" #include "nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestBatchnormFp32 : public mindspore::CommonTest { public: TestBatchnormFp32() {} @@ -154,4 +154,4 @@ TEST_F(TestBatchnormFp32, easyTest) { kernel->set_parameter(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc index 2ad43dfe..f38ff426 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc @@ -19,7 +19,7 @@ #include "src/litert/tensor_category.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class TestConstantOfShapeFp32 : public mindspore::CommonTest { public: TestConstantOfShapeFp32() {} @@ -70,4 +70,4 @@ TEST_F(TestConstantOfShapeFp32, Simple) { for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc index 6c6420df..27d8a9b2 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/conv1x1_fp32_tests.cc @@ -18,7 +18,7 @@ #include "nnacl_c/matmul_parameter.h" #include "src/litert/kernel/cpu/fp32/convolution_1x1_fp32.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::Tensor; class TestConv1x1Fp32 : public mindspore::CommonTest { @@ -135,4 +135,4 @@ TEST_F(TestConv1x1Fp32, Input1x1PrePack4) { EXPECT_EQ(0, CompareOutputData(out, correct, 54)); delete conv_param; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc index 655b8a38..79e0602b 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestConvolutionDwFp32 : public mindspore::CommonTest { public: TestConvolutionDwFp32() {} @@ -154,4 +154,4 @@ TEST_F(TestConvolutionDwFp32, ConvDwFp32Accuracy) { delete ctx; MS_LOG(INFO) << "TestConvolutionDwFp32 accuracy passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/convolution_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/convolution_fp32_tests.cc index 934f7d7b..686a350d 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/convolution_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/convolution_fp32_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel/cpu/base/convolution_base.h" #include "nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestConvolutionFp32 : public mindspore::CommonTest { public: TestConvolutionFp32() {} @@ -139,4 +139,4 @@ TEST_F(TestConvolutionFp32, conv1) { delete kernel; delete ctx; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc index 83d510c4..35a13855 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc @@ -19,7 +19,7 @@ #include "src/litert/lite_kernel.h" #include "nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class CropTestFp32 : public mindspore::CommonTest { public: CropTestFp32() = default; @@ -305,4 +305,4 @@ TEST_F(CropTestFp32, CropTest11) { kernel->set_parameter(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/cumsum_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/cumsum_tests.cc index 0fa93471..1384223c 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/cumsum_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/cumsum_tests.cc @@ -19,7 +19,7 @@ #include "nnacl_c/cumsum_parameter.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestCumsum : public mindspore::CommonTest { public: TestCumsum() {} @@ -388,4 +388,4 @@ TEST_F(TestCumsum, TestIntRank2Thread4) { delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc index 16a9c598..f525e45f 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc @@ -23,7 +23,7 @@ #include "src/litert/tensor_category.h" #include "nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestDeConvolutionFp32 : public mindspore::CommonTest { public: TestDeConvolutionFp32() = default; @@ -673,4 +673,4 @@ TEST_F(TestDeConvolutionFp32, DeConvTest3) { for (auto t : outputs_) delete t; free(correct); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc index 901dcd06..e2c1fde6 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/depth_to_space_fp32_test.cc @@ -20,7 +20,7 @@ #include "nnacl_c/depth_to_space_parameter.h" #include "nnacl_c/kernel/depth_to_space.h" -namespace mindspore { +namespace mindspore::lite { class DepthToSpaceTestFp32 : public mindspore::CommonTest { public: @@ -84,4 +84,4 @@ TEST_F(DepthToSpaceTestFp32, DepthToSpaceTest3) { std::cout << "\n"; ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc index 891bb937..c9dc975a 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc @@ -21,7 +21,7 @@ #include "src/executor/kernel_exec.h" #include "src/common/file_utils.h" -namespace mindspore { +namespace mindspore::lite { class TestDetectionPostProcessFp32 : public mindspore::CommonTest { public: TestDetectionPostProcessFp32() {} @@ -162,4 +162,4 @@ TEST_F(TestDetectionPostProcessFp32, Fast) { for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc index 92fc83ca..741c8d3a 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc @@ -22,7 +22,7 @@ #include "src/common/log_adapter.h" #include "src/litert/tensor_category.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::Tensor; class TestEmbeddingLookupFp32 : public mindspore::CommonTest { @@ -92,4 +92,4 @@ TEST_F(TestEmbeddingLookupFp32, ElTest) { delete ctx; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc index 1c9308f5..c4ecdbf7 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc @@ -25,7 +25,7 @@ #include "src/litert/kernel_registry.h" #include "src/litert/kernel/cpu/nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::Tensor; class TestFcFp32 : public mindspore::CommonTest { @@ -265,4 +265,4 @@ TEST_F(TestFcFp32, FcTest4_Vec2Batch) { DestroyTensors(inputs); DestroyTensors(outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc index bca1f672..548245f2 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc @@ -19,7 +19,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class TestL2NormFp32 : public mindspore::CommonTest { public: TestL2NormFp32() = default; @@ -159,4 +159,4 @@ TEST_F(TestL2NormFp32, Test4) { ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol_)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/logicalor_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/logicalor_fp32_test.cc index 58cbcc24..72040294 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/logicalor_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/logicalor_fp32_test.cc @@ -20,7 +20,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class TestLogicalOrFp32 : public mindspore::CommonTest { public: @@ -37,4 +37,4 @@ TEST_F(TestLogicalOrFp32, LogicalOrFp32) { ASSERT_EQ(output[i], expect[i]); } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc index deca3585..9eb6db1a 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc @@ -23,7 +23,7 @@ #include "src/executor/kernel_exec.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr int kSparseType = 1; @@ -170,4 +170,4 @@ TEST_F(TestLshProjectionFp32, Sparse3DInputs) { out_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc index 9441e03b..097b5f98 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc @@ -20,7 +20,7 @@ #include "nnacl_c/fp32/lstm_fp32.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class LstmFp32 : public mindspore::CommonTest { public: LstmFp32() = default; @@ -337,4 +337,4 @@ TEST_F(LstmFp32, LstmBackwardFp32Accuracy) { MS_LOG(INFO) << "LstmFp32 backward accuracy passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc index b7dcc712..f60f646a 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc @@ -23,7 +23,7 @@ #include "src/litert/tensor_category.h" #include "src/litert/kernel/cpu/nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestMatMulFp32 : public mindspore::CommonTest { public: TestMatMulFp32() {} @@ -373,4 +373,4 @@ TEST_F(TestMatMulFp32, batch) { for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/nllloss_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/nllloss_fp32_test.cc index 6fdbba8f..b8c109f6 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/nllloss_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/nllloss_fp32_test.cc @@ -21,7 +21,7 @@ #include "nnacl_c/nllloss_parameter.h" #include "nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestNLLLossFp32 : public mindspore::CommonTest { public: TestNLLLossFp32() {} @@ -161,4 +161,4 @@ TEST_F(TestNLLLossFp32, ReductionMean) { ASSERT_EQ(0, CompareOutputData(reinterpret_cast(outputs[1]->MutableData()), expect_total_weight, 1, 0.0001)); NLLLossReleaseResources(ctx, kernel, inputs, outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc index 6edf25e5..9d7eba32 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc @@ -20,7 +20,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class TestNMSFp32 : public mindspore::CommonTest { public: TestNMSFp32() = default; @@ -117,4 +117,4 @@ TEST_F(TestNMSFp32, TestCase1) { CompareOutputData(reinterpret_cast(out_tensor_.data()), expect.data(), output_size, err_tol_)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/one_hot_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/one_hot_fp32_test.cc index cb9ec5c8..8458f084 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/one_hot_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/one_hot_fp32_test.cc @@ -22,7 +22,7 @@ #include "schema/ops_generated.h" #include "nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestOneHotFp32 : public mindspore::CommonTest { public: @@ -144,4 +144,4 @@ TEST_F(TestOneHotFp32, Test3) { ASSERT_EQ(0, CompareOutputData(out_data, expect.data(), 36, err_tol)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc index 7a942a14..4fcb23fa 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc @@ -21,7 +21,7 @@ #include "nnacl/nnacl_manager.h" #include "nnacl_c/pow_parameter.h" -namespace mindspore { +namespace mindspore::lite { class TestPowerFp32 : public mindspore::CommonTest { public: TestPowerFp32() {} @@ -105,4 +105,4 @@ TEST_F(TestPowerFp32, Broadcast) { delete kernel; delete ctx; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/ragged_range_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/ragged_range_fp32_tests.cc index 2d536075..3dd1849d 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/ragged_range_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/ragged_range_fp32_tests.cc @@ -21,7 +21,7 @@ #include "src/executor/kernel_exec.h" #include "nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestRaggedRangeFp32 : public mindspore::CommonTest { public: TestRaggedRangeFp32() {} @@ -133,4 +133,4 @@ TEST_F(TestRaggedRangeFp32, 002) { kernel->set_parameter(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc index ca6857e4..06ce111f 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc @@ -31,7 +31,7 @@ using mindspore::schema::ReduceMode_ReduceProd; using mindspore::schema::ReduceMode_ReduceSum; using mindspore::schema::ReduceMode_ReduceSumSquare; -namespace mindspore { +namespace mindspore::lite { class TestReduceFp32 : public mindspore::CommonTest { public: @@ -481,4 +481,4 @@ TEST_F(TestReduceFp32, ASum) { int output_size = 32; ASSERT_EQ(0, CompareOutputData(out, correct, output_size, err_tol)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc index 0250d9fb..8a07d69c 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc @@ -21,7 +21,7 @@ #include "nnacl_c/resize_parameter.h" #include "schema/ops_generated.h" -namespace mindspore { +namespace mindspore::lite { class TestResizeBilinearFp32 : public mindspore::CommonTest { public: @@ -409,4 +409,4 @@ TEST_F(TestResizeBilinearFp32, ResizeBilinearTest16) { ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc index 800a586e..a0ed0c6a 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc @@ -18,7 +18,7 @@ #include "nnacl_c/resize_parameter.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestResizeNearestNeighborFp32 : public mindspore::CommonTest { public: @@ -551,4 +551,4 @@ TEST_F(TestResizeNearestNeighborFp32, ResizeNearestNeighborTest16) { ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc index 0a1f041a..33a03e56 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc @@ -20,7 +20,7 @@ #include "nnacl_c/fp32/reverse_sequence_fp32.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestReverseSequenceFp32 : public mindspore::CommonTest { public: TestReverseSequenceFp32() {} @@ -166,4 +166,4 @@ TEST_F(TestReverseSequenceFp32, BatchSeqNotAdjacent) { out_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc index 2acb64a3..ba14f7c8 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc @@ -19,7 +19,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class TestROIPoolingFp32 : public mindspore::CommonTest { public: TestROIPoolingFp32() {} @@ -75,4 +75,4 @@ TEST_F(TestROIPoolingFp32, Simple) { for (auto t : inputs_) delete t; for (auto t : outputs_) delete t; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc index 5b16bb68..8233db6e 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc @@ -27,7 +27,7 @@ using mindspore::schema::ActivationType; using mindspore::schema::ActivationType_NO_ACTIVATION; using mindspore::schema::ActivationType_RELU; using mindspore::schema::ActivationType_RELU6; -namespace mindspore { +namespace mindspore::lite { class TestScaleFp32 : public mindspore::CommonTest { public: @@ -164,4 +164,4 @@ TEST_F(TestScaleFp32, ScaleRelu6) { ASSERT_EQ(0, CompareOutputData(out_data, expect.data(), 12, err_tol)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scatter_nd_add_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scatter_nd_add_fp32_test.cc index 47cf6f3e..03a50a73 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scatter_nd_add_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scatter_nd_add_fp32_test.cc @@ -18,7 +18,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/scatter_nd_parameter.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::Tensor; class TestScatterNdAdd : public mindspore::CommonTest { @@ -136,4 +136,4 @@ TEST_F(TestScatterNdAdd, Fp32ThreeDims) { DestroyTensors(inputs); DestroyTensors(outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scatter_nd_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scatter_nd_fp32_tests.cc index b171b5b5..fb7b3e7a 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scatter_nd_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/scatter_nd_fp32_tests.cc @@ -17,7 +17,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/scatter_nd_parameter.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::Tensor; class TestScatterNdFp32 : public mindspore::CommonTest { @@ -90,4 +90,4 @@ TEST_F(TestScatterNdFp32, ScatterNdUpdate) { DestroyTensors(inputs); DestroyTensors(outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc index aaa12c6e..58dede0b 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc @@ -23,7 +23,7 @@ #include "src/common/log_adapter.h" #include "src/common/string_utils.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::StringPack; using mindspore::lite::Tensor; @@ -84,4 +84,4 @@ TEST_F(TestSkipGramFp32, ElTest) { delete ctx; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/softmax_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/softmax_tests.cc index 57a287d4..cdffca19 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/softmax_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/softmax_tests.cc @@ -19,7 +19,7 @@ #include "nnacl_c/softmax_parameter.h" #include "src/litert/kernel/cpu/nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestSoftmaxFp32 : public mindspore::CommonTest { public: TestSoftmaxFp32() {} @@ -59,4 +59,4 @@ TEST_F(TestSoftmaxFp32, 001) { kernel->set_parameter(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc index 88e177b0..aa78d429 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/space_to_batch_fp32_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class SpaceToBatchTestFp32 : public mindspore::CommonTest { public: @@ -86,4 +86,4 @@ TEST_F(SpaceToBatchTestFp32, SpaceToBatchTest6) { DoSpaceToBatch(input.data(), out, ¶m, 0); ASSERT_EQ(0, CompareOutputData(out, expect_out.data(), kOutSize, 0.000001)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc index 1ebbe4f8..6376681a 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc @@ -23,7 +23,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class SpaceToDepthTestFp32 : public mindspore::CommonTest { public: @@ -103,4 +103,4 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) { delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc index 08704f99..db0b7b22 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc @@ -23,7 +23,7 @@ #include "src/executor/kernel_exec.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class TestSparseToDenseFp32 : public mindspore::CommonTest { public: @@ -217,4 +217,4 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test5) { DestroyTensors(inputs); DestroyTensors(outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/stack_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/stack_fp32_test.cc index 758e7f3f..b93a2e8b 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/stack_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/stack_fp32_test.cc @@ -16,7 +16,7 @@ #include "common/common_test.h" #include "nnacl_c/base/stack_base.h" -namespace mindspore { +namespace mindspore::lite { class StackTestFp32 : public mindspore::CommonTest { public: StackTestFp32() = default; @@ -42,4 +42,4 @@ TEST_F(StackTestFp32, StackTest1) { ASSERT_EQ(0, CompareOutputData(output, expect_out, kOutSize, 0.000001)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc index fde4a2bf..50a271e2 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc @@ -20,7 +20,7 @@ #include "src/litert/kernel_registry.h" #include "src/litert/kernel/cpu/nnacl/nnacl_manager.h" -namespace mindspore { +namespace mindspore::lite { class TestTileFp32 : public mindspore::CommonTest { public: TestTileFp32() {} @@ -157,4 +157,4 @@ TEST_F(TestTileFp32, SimpleTile2) { out_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc index f6852855..01c49f93 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc @@ -20,7 +20,7 @@ #include "nnacl_c/fp32/topk_fp32.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestTopKFp32 : public mindspore::CommonTest { public: TestTopKFp32() {} @@ -68,4 +68,4 @@ TEST_F(TestTopKFp32, TopK) { out_tensor1.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc index 87210921..543f405e 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc @@ -23,7 +23,7 @@ #include "nnacl/nnacl_manager.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class TestTransposeFp32 : public mindspore::CommonTest { public: @@ -271,4 +271,4 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) { /* 1x2x3x2x2 */ delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/uniform_real_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/uniform_real_fp32_test.cc index be300dea..a865e052 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/uniform_real_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/uniform_real_fp32_test.cc @@ -19,7 +19,7 @@ #include "nnacl_c/random_parameter.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestUniformRealFp32 : public mindspore::CommonTest { public: TestUniformRealFp32() {} @@ -67,4 +67,4 @@ TEST_F(TestUniformRealFp32, UniformReal) { out_tensor0.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc index 047d16ac..bf581aff 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc @@ -20,7 +20,7 @@ #include "nnacl_c/fp32/unique_fp32.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestUniqueFp32 : public mindspore::CommonTest { public: TestUniqueFp32() {} @@ -71,4 +71,4 @@ TEST_F(TestUniqueFp32, Unique) { out_tensor1.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc index 4b57c33c..5d65a6be 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc @@ -20,7 +20,7 @@ #include "nnacl_c/base/unstack_base.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestUnstackFp32 : public mindspore::CommonTest { public: TestUnstackFp32() {} @@ -127,4 +127,4 @@ TEST_F(TestUnstackFp32, Unstack2) { out_tensor2.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc index 1d0ef614..60fc2dc6 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/activation_grad_fp32_tests.cc @@ -27,7 +27,7 @@ #include "src/litert/kernel/cpu/fp32_grad/activation_grad.h" #include "nnacl_c/fp32_grad/activation_grad_fp32.h" -namespace mindspore { +namespace mindspore::lite { class TestActGradFp32 : public mindspore::CommonTest { public: TestActGradFp32() {} @@ -397,4 +397,4 @@ TEST_F(TestActGradFp32, hsigmoidGradFp32) { MS_LOG(INFO) << "hsigmoidGradFp32 passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc index 72265769..24730b28 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc @@ -25,7 +25,7 @@ #include "src/litert/kernel/cpu/fp32_grad/arithmetic_grad.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { #ifdef PRIMITIVE_WRITEABLE ArithmeticParameter *PopulateArithmeticParameter(mindspore::schema::PrimitiveType type, @@ -887,4 +887,4 @@ TEST_F(TestArithmeticGradFp32, TestMaximumGradBroadcastFp32) { MS_LOG(INFO) << "TestMaximumGradBroadcastFp32 passed"; } #endif -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc index f4b3ff7e..bc6a3672 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel/cpu/fp32_grad/bias_grad.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestBiasGradFp32 : public mindspore::CommonTest { public: @@ -136,4 +136,4 @@ TEST_F(TestBiasGradFp32, BiasGrad2DFp32) { MS_LOG(INFO) << "BiasGradFp32 passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc index 642f5e20..d0a8307a 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc @@ -24,7 +24,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/batchnorm_parameter.h" -namespace mindspore { +namespace mindspore::lite { constexpr int kSize3 = 3; constexpr int kSize7 = 7; class TestBNGradFp32 : public mindspore::CommonTest { @@ -224,4 +224,4 @@ TEST_F(TestBNGradFp32, BNTtrainFp32) { delete x_tensor; delete kernel_obj; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc index 669032a9..02b348ec 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc @@ -26,7 +26,7 @@ #include "nnacl_c/conv_parameter.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestConvolutionGradFp32 : public mindspore::CommonTest { public: TestConvolutionGradFp32() {} @@ -822,4 +822,4 @@ TEST_F(TestConvolutionGradFp32, ConvGroup2Dilation2Stride2) { MS_LOG(INFO) << "TestConvolutionGradFp32 Filter Grad passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc index cd1ae7e5..0c581e1d 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc @@ -23,7 +23,7 @@ #include "nnacl_c/conv_parameter.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestDeConvolutionGradFp32 : public mindspore::CommonTest { public: TestDeConvolutionGradFp32() {} @@ -667,4 +667,4 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group12Stride2FilterGrad) { MS_LOG(INFO) << "TestDeConvolutionGradFp32 Filter Grad passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc index c07dbb46..5d252665 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc @@ -33,7 +33,7 @@ #include "src/litert/kernel/cpu/fp32_grad/convolution.h" using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { class NetworkTest : public mindspore::CommonTest { public: NetworkTest() {} @@ -118,4 +118,4 @@ TEST_F(NetworkTest, mobileface_net) { delete model; delete session; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/nllloss_grad_fp32_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/nllloss_grad_fp32_test.cc index 3985c94a..3f4303ff 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/nllloss_grad_fp32_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/nllloss_grad_fp32_test.cc @@ -21,7 +21,7 @@ #include "src/executor/kernel_exec.h" #include "src/litert/tensor_category.h" -namespace mindspore { +namespace mindspore::lite { class TestNLLLossGradFp32 : public mindspore::CommonTest { public: TestNLLLossGradFp32() {} @@ -144,4 +144,4 @@ TEST_F(TestNLLLossGradFp32, ReductionMean) { ASSERT_EQ(0, CompareOutputData(reinterpret_cast(outputs[0]->MutableData()), expect_loss, 15, 0.0001)); NLLLossGradReleaseResources(ctx, kernel, param, inputs, outputs); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc index 473caf06..ea88b5b1 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc @@ -25,7 +25,7 @@ #include "src/litert/kernel/cpu/fp32_grad/pooling_grad.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestPoolingGradFp32 : public mindspore::CommonTest { public: TestPoolingGradFp32() {} @@ -687,4 +687,4 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride3Fp32) { delete kernel; MS_LOG(INFO) << "MaxPoolGradStride3Fp32 Filter Grad passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc index 13b32ff8..2b7ad7df 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel/cpu/fp32_grad/sparse_softmax_cross_entropy_with_logits.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestSoftmaxCrossEntropyFp32 : public mindspore::CommonTest { public: @@ -117,4 +117,4 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) { MS_LOG(INFO) << "SoftmaxCrossEntropyFp32 passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc index b04e75c2..dd1a4556 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc @@ -25,7 +25,7 @@ #include "nnacl_c/fp32_grad/softmax_grad.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestSoftmaxGradFp32 : public mindspore::CommonTest { public: TestSoftmaxGradFp32() {} @@ -373,4 +373,4 @@ TEST_F(TestSoftmaxGradFp32, SoftmaxGradAxisMinus1) { MS_LOG(INFO) << "SoftmaxGradAxisMinus1 passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc index 9d255de0..92f5d6bd 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc @@ -20,7 +20,7 @@ #include "common/common_test.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestQuantizedAdd : public mindspore::CommonTest { public: TestQuantizedAdd() {} @@ -74,4 +74,4 @@ TEST_F(TestQuantizedAdd, Add) { out_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc index 8eb3c104..59864ba4 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc @@ -22,7 +22,7 @@ #include "src/executor/kernel_exec.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class TestArithmeticSelfInt8 : public mindspore::CommonTest { public: @@ -1037,4 +1037,4 @@ TEST_F(TestArithmeticSelfInt8, logical_not_quant0_thread2) { delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc index 72abe37c..534d087a 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc @@ -22,7 +22,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class TestBatchnormInt8 : public mindspore::CommonTest { public: TestBatchnormInt8() {} @@ -167,4 +167,4 @@ TEST_F(TestBatchnormInt8, BNTest) { kernel->set_parameter(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc index ca25ffd2..db7104e9 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc @@ -23,7 +23,7 @@ #include "src/executor/kernel_exec.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class TestConcatInt8 : public mindspore::CommonTest { public: @@ -257,4 +257,4 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2_quant1) { delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc index 0ba9e2a7..bf1eace0 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc @@ -22,7 +22,7 @@ #include "src/litert/kernel/cpu/int8/convolution_1x1_int8.h" #include "src/litert/tensor_category.h" -namespace mindspore { +namespace mindspore::lite { using lite::Tensor; class TestConv1x1Int8 : public mindspore::CommonTest { public: @@ -284,4 +284,4 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test2) { for (auto t : outputs_) delete t; free(correct); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc index a301fba6..9c28587f 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc @@ -23,7 +23,7 @@ #include "src/executor/kernel_exec.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class TestCropInt8 : public mindspore::CommonTest { public: @@ -710,4 +710,4 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread3) { delete ctx; delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc index 80c3d256..5283e4ff 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc @@ -27,7 +27,7 @@ using mindspore::lite::DeviceType; -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::LiteQuantParam; using mindspore::lite::Tensor; class TestDeconvInt8 : public mindspore::CommonTest { @@ -339,4 +339,4 @@ TEST_F(TestDeconvInt8, DeConvInt8Test1) { for (auto t : outputs_) delete t; free(correct); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc index 21e9c732..3ee56078 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc @@ -22,7 +22,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { using lite::Tensor; class TestFcInt8 : public mindspore::CommonTest { public: @@ -153,4 +153,4 @@ TEST_F(TestFcInt8, fctest1) { for (auto t : outputs) delete t; delete[] out; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc index c3bbcf72..8c1ad210 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc @@ -22,7 +22,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class TestGatherNdInt8 : public mindspore::CommonTest { public: TestGatherNdInt8() {} @@ -102,4 +102,4 @@ TEST_F(TestGatherNdInt8, GatherNdTest) { delete kernel; MS_LOG(INFO) << "TestGatherNd accuracy passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc index f2171064..847bf3ba 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class TestGatherInt8 : public mindspore::CommonTest { public: TestGatherInt8() {} @@ -99,4 +99,4 @@ TEST_F(TestGatherInt8, GatherTest) { delete kernel; MS_LOG(INFO) << "TestGather_int8 accuracy passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc index ef03b3f5..53b7c009 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc @@ -22,7 +22,7 @@ #include "src/litert/kernel/cpu/int8/hswish_int8.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestHSwishInt8 : public mindspore::CommonTest { public: TestHSwishInt8() {} @@ -73,4 +73,4 @@ TEST_F(TestHSwishInt8, HSwish) { out_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/l2_norm_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/l2_norm_int8_tests.cc index b8ea47a5..5acbd170 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/l2_norm_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/l2_norm_int8_tests.cc @@ -20,7 +20,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/l2_norm_parameter.h" -namespace mindspore { +namespace mindspore::lite { class TestL2NormInt8 : public mindspore::CommonTest { public: TestL2NormInt8() {} @@ -116,4 +116,4 @@ TEST_F(TestL2NormInt8, norm2) { out_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc index fde391e5..61a65295 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc @@ -24,7 +24,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class TestMatmulInt8 : public mindspore::CommonTest { public: TestMatmulInt8() {} @@ -260,4 +260,4 @@ TEST_F(TestMatmulInt8, mmtest2) { delete[] out; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc index e42c1122..c35129ad 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc @@ -24,7 +24,7 @@ #include "src/tensor.h" #include "nnacl_c/arithmetic_parameter.h" -namespace mindspore { +namespace mindspore::lite { class TestMulInt8 : public mindspore::CommonTest { public: @@ -401,4 +401,4 @@ TEST_F(TestMulInt8, test) { delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc index 1d94b775..081e8492 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc @@ -23,7 +23,7 @@ #include "nnacl_c/pad_parameter.h" #include "src/litert/kernel/cpu/int8/pad_int8.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::LiteQuantParam; using mindspore::lite::Tensor; class TestPadInt8 : public mindspore::CommonTest { @@ -243,4 +243,4 @@ TEST_F(TestPadInt8, PadInt8TestInit4) { delete ctx; free(correct); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc index 3899c22a..fb1aeb41 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc @@ -22,7 +22,7 @@ #include "nnacl_c/pow_parameter.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestPowerInt8 : public mindspore::CommonTest { public: @@ -83,4 +83,4 @@ TEST_F(TestPowerInt8, normal) { kernel->set_parameter(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc index 0e55eb45..d54c159b 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc @@ -23,7 +23,7 @@ #include "src/executor/kernel_exec.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class TestPreluInt8 : public mindspore::CommonTest { public: @@ -94,4 +94,4 @@ TEST_F(TestPreluInt8, prelu_1) { delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc index 576225b6..082ad11b 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc @@ -23,7 +23,7 @@ #include "src/litert/kernel_registry.h" #include "src/executor/kernel_exec.h" -namespace mindspore { +namespace mindspore::lite { class QuantDTypeCastTestFp32 : public mindspore::CommonTest { public: @@ -132,4 +132,4 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) { ASSERT_EQ(0, CompareOutputData(output.data(), expect_out, out_size, 0.000001)); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc index c9e038eb..fbaa81c3 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/fp32/reduce_fp32.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::LiteQuantParam; using mindspore::lite::Tensor; using mindspore::schema::ReduceMode; @@ -364,4 +364,4 @@ TEST_F(TestReduceInt8, SumSquare2Axis) { CompareOutputInt8(output_data, correct, output_size, err_tol_); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc index bda47a03..d9052b89 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel/cpu/int8/relux_int8.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestReluXInt8 : public mindspore::CommonTest { public: TestReluXInt8() {} @@ -120,4 +120,4 @@ TEST_F(TestReluXInt8, Relu6) { out_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc index a765e82f..ef4282b3 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc @@ -23,7 +23,7 @@ #include "src/executor/kernel_exec.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class TestReshapeInt8 : public mindspore::CommonTest { public: @@ -156,4 +156,4 @@ TEST_F(TestReshapeInt8, reshape_quant1_thread2) { delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc index c79ef65b..8f92317e 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/int8/resize_int8.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::LiteQuantParam; using mindspore::lite::Tensor; @@ -179,4 +179,4 @@ TEST_F(TestResizeBilinearInt8, Bilinear3) { CompareOutputInt8(output_data, expect, 160, err_percent_); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc index c664a889..84bfe532 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/int8/resize_int8.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::LiteQuantParam; using mindspore::lite::Tensor; @@ -210,4 +210,4 @@ TEST_F(TestResizeNearestNeighborInt8, NearestNeighbor4) { CompareOutputInt8(output_data, expect, out_element_num, err_percent_); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/scale_int8.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/scale_int8.cc index 2df0fbc2..47c20002 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/scale_int8.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/scale_int8.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel_registry.h" #include "nnacl_c/int8/scale_int8.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::LiteQuantParam; using mindspore::lite::Tensor; @@ -177,4 +177,4 @@ TEST_F(TestScaleInt8, scale3) { err_tol_ = 0.01; CompareOutputInt8(out_data, correct, output_size, err_tol_); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc index 148ea7b5..92862f4c 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc @@ -20,7 +20,7 @@ #include "nnacl_c/fp32/activation_fp32.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestSigmoidInt8 : public mindspore::CommonTest { public: TestSigmoidInt8() {} @@ -71,4 +71,4 @@ TEST_F(TestSigmoidInt8, Sigmoid) { out_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc index ce6646c6..bfee47e6 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel/cpu/int8/slice_int8.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestSliceInt8 : public mindspore::CommonTest { public: TestSliceInt8() {} @@ -448,4 +448,4 @@ TEST_F(TestSliceInt8, Slice4Thread) { size_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc index 3893a239..810d7f43 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc @@ -22,7 +22,7 @@ #include "nnacl_c/softmax_parameter.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestSoftmaxInt8 : public mindspore::CommonTest { public: @@ -87,4 +87,4 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) { output0_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/space_to_batch_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/space_to_batch_int8_tests.cc index d3ad1f6b..7b990adc 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/space_to_batch_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/space_to_batch_int8_tests.cc @@ -18,7 +18,7 @@ #include "nnacl_c/fp32/space_to_batch_fp32.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class SpaceToBatchTestInt8 : public mindspore::CommonTest { public: SpaceToBatchTestInt8() {} @@ -58,4 +58,4 @@ TEST_F(SpaceToBatchTestInt8, test1) { out_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc index 7fe33d32..35dea06d 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc @@ -23,7 +23,7 @@ #include "src/executor/kernel_exec.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class TestSplitInt8 : public mindspore::CommonTest { public: @@ -309,4 +309,4 @@ TEST_F(TestSplitInt8, Split_quant1_thread2_num) { delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc index 51b6102b..0e8409cb 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc @@ -23,7 +23,7 @@ #include "src/executor/kernel_exec.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class TestSqueezeInt8 : public mindspore::CommonTest { public: @@ -92,4 +92,4 @@ TEST_F(TestSqueezeInt8, Squeeze_1d_axis0_offset0_quant0_thread2) { delete ctx; delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc index 0b680862..89379e81 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc @@ -21,7 +21,7 @@ #include "src/litert/kernel/cpu/int8/sub_int8.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestSubInt8 : public mindspore::CommonTest { public: TestSubInt8() {} @@ -127,4 +127,4 @@ TEST_F(TestSubInt8, SubInt8T2) { out_tensor.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc index 173e0136..a7b9b1b8 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc @@ -21,7 +21,7 @@ #include "nnacl_c/fp32/topk_fp32.h" #include "src/litert/kernel_registry.h" -namespace mindspore { +namespace mindspore::lite { class TestTopKInt8 : public mindspore::CommonTest { public: TestTopKInt8() {} @@ -66,4 +66,4 @@ TEST_F(TestTopKInt8, TopK) { out_tensor1.set_data(nullptr); delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc index d509b258..9c3b99e4 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc @@ -22,7 +22,7 @@ #include "src/executor/kernel_exec.h" #include "src/tensor.h" -namespace mindspore { +namespace mindspore::lite { class TestUnsqueezeInt8 : public mindspore::CommonTest { public: @@ -95,4 +95,4 @@ TEST_F(TestUnsqueezeInt8, Unsqueeze_1) { delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/arm/string/normalize.cc b/mindspore-lite/test/ut/src/runtime/kernel/arm/string/normalize.cc index 6644e069..7a88931f 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/arm/string/normalize.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/arm/string/normalize.cc @@ -24,7 +24,7 @@ #include "src/common/log_adapter.h" #include "src/common/string_utils.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::lite::StringPack; using mindspore::lite::Tensor; @@ -89,4 +89,4 @@ TEST_F(TestNormalize, TestSentence) { } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/common_utils_test.cc b/mindspore-lite/test/ut/src/runtime/kernel/common_utils_test.cc index d974dcbe..7119ba6a 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/common_utils_test.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/common_utils_test.cc @@ -18,7 +18,7 @@ #include "common/common_test.h" #include "common/common_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class CommonUtilTest : public mindspore::CommonTest { public: @@ -131,4 +131,4 @@ TEST_F(CommonUtilTest, BucketReduceSparseGradient2) { } } } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel/cuda/batchtospace_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel/cuda/batchtospace_tests.cc index a3d61223..7b383845 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel/cuda/batchtospace_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel/cuda/batchtospace_tests.cc @@ -20,7 +20,7 @@ #include "ut/src/extendrt/kernel/cuda/common.h" #include "nnacl_c/batch_to_space_parameter.h" -namespace mindspore { +namespace mindspore::lite { class CudaTest_BatchToSpace : public CommonTest { public: CudaTest_BatchToSpace() {} @@ -81,4 +81,4 @@ TEST_F(CudaTest_BatchToSpace, basic) { delete out_tensor; delete kernel; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/kernel_executor_tests.cc b/mindspore-lite/test/ut/src/runtime/kernel_executor_tests.cc index bac21f37..421d6620 100644 --- a/mindspore-lite/test/ut/src/runtime/kernel_executor_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/kernel_executor_tests.cc @@ -26,7 +26,7 @@ #include "infer/pad.h" #include "ops/base_operator.h" -namespace mindspore { +namespace mindspore::lite { class KernelExecutorTest : public mindspore::CommonTest { public: KernelExecutorTest(); @@ -771,4 +771,4 @@ TEST_F(KernelExecutorTest, TestTranspose) { ASSERT_EQ(0, CompareOutputData(reinterpret_cast(outputs[0].MutableData()), correct, outputs[0].ElementNum(), 0.0001)); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/runtime/runtime_convert_tests.cc b/mindspore-lite/test/ut/src/runtime/runtime_convert_tests.cc index 08bfde65..60e883fc 100644 --- a/mindspore-lite/test/ut/src/runtime/runtime_convert_tests.cc +++ b/mindspore-lite/test/ut/src/runtime/runtime_convert_tests.cc @@ -17,7 +17,7 @@ #include "include/api/model.h" #include "include/api/status.h" -namespace mindspore { +namespace mindspore::lite { class RuntimeConvert : public mindspore::CommonTest { public: RuntimeConvert() = default; @@ -86,4 +86,4 @@ TEST_F(RuntimeConvert, relu3) { ASSERT_LE(fp32_data[2], 3.0); ASSERT_LE(fp32_data[3], 4.0); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/src/utils_test.cc b/mindspore-lite/test/ut/src/utils_test.cc index aca67252..b23cc15b 100644 --- a/mindspore-lite/test/ut/src/utils_test.cc +++ b/mindspore-lite/test/ut/src/utils_test.cc @@ -1,66 +1,66 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include "schema/inner/model_generated.h" -#include "common/common_test.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/executor/kernel_exec.h" -#include "src/litert/kernel_exec_util.h" - -namespace mindspore { -class UtilsTest : public mindspore::CommonTest { - public: - UtilsTest() {} -}; - -TEST_F(UtilsTest, TestSubgraph) { - auto kernel0 = std::make_shared(); - auto kernel1 = std::make_shared(); - auto kernel2 = std::make_shared(); - - auto tensor0 = std::make_shared(); - auto tensor1 = std::make_shared(); - auto tensor2 = std::make_shared(); - auto tensor3 = std::make_shared(); - auto tensor4 = std::make_shared(); - - kernel0->AddOutKernel(kernel1.get()); - kernel1->AddInKernel(kernel0.get()); - kernel1->AddOutKernel(kernel2.get()); - kernel2->AddInKernel(kernel1.get()); - - kernel0->set_in_tensors({tensor0.get(), tensor1.get()}); - kernel0->set_out_tensors({tensor2.get()}); - kernel1->set_in_tensors({tensor2.get()}); - kernel1->set_out_tensors({tensor3.get()}); - kernel2->set_in_tensors({tensor3.get()}); - kernel2->set_out_tensors({tensor4.get()}); - - std::vector kernels = {kernel0.get(), kernel1.get(), kernel2.get()}; - - auto input_kernels = kernel::KernelExecUtil::SubgraphInputNodes(kernels); - ASSERT_EQ(input_kernels.size(), 1); - auto output_kernels = kernel::KernelExecUtil::SubgraphOutputNodes(kernels); - ASSERT_EQ(output_kernels.size(), 1); - auto input_tensors = kernel::KernelExecUtil::SubgraphInputTensors(kernels); - ASSERT_EQ(input_tensors.size(), 2); - auto output_tensors = kernel::KernelExecUtil::SubgraphOutputTensors(kernels); - ASSERT_EQ(output_tensors.size(), 1); -} -} // namespace mindspore +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "schema/inner/model_generated.h" +#include "common/common_test.h" +#include "include/errorcode.h" +#include "src/common/log_adapter.h" +#include "src/executor/kernel_exec.h" +#include "src/litert/kernel_exec_util.h" + +namespace mindspore::lite { +class UtilsTest : public mindspore::CommonTest { + public: + UtilsTest() {} +}; + +TEST_F(UtilsTest, TestSubgraph) { + auto kernel0 = std::make_shared(); + auto kernel1 = std::make_shared(); + auto kernel2 = std::make_shared(); + + auto tensor0 = std::make_shared(); + auto tensor1 = std::make_shared(); + auto tensor2 = std::make_shared(); + auto tensor3 = std::make_shared(); + auto tensor4 = std::make_shared(); + + kernel0->AddOutKernel(kernel1.get()); + kernel1->AddInKernel(kernel0.get()); + kernel1->AddOutKernel(kernel2.get()); + kernel2->AddInKernel(kernel1.get()); + + kernel0->set_in_tensors({tensor0.get(), tensor1.get()}); + kernel0->set_out_tensors({tensor2.get()}); + kernel1->set_in_tensors({tensor2.get()}); + kernel1->set_out_tensors({tensor3.get()}); + kernel2->set_in_tensors({tensor3.get()}); + kernel2->set_out_tensors({tensor4.get()}); + + std::vector kernels = {kernel0.get(), kernel1.get(), kernel2.get()}; + + auto input_kernels = kernel::KernelExecUtil::SubgraphInputNodes(kernels); + ASSERT_EQ(input_kernels.size(), 1); + auto output_kernels = kernel::KernelExecUtil::SubgraphOutputNodes(kernels); + ASSERT_EQ(output_kernels.size(), 1); + auto input_tensors = kernel::KernelExecUtil::SubgraphInputTensors(kernels); + ASSERT_EQ(input_tensors.size(), 2); + auto output_tensors = kernel::KernelExecUtil::SubgraphOutputTensors(kernels); + ASSERT_EQ(output_tensors.size(), 1); +} +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/activation_mapper_test.cc b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/activation_mapper_test.cc index f474d073..dd53bce6 100644 --- a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/activation_mapper_test.cc +++ b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/activation_mapper_test.cc @@ -42,7 +42,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr int kNumInputIndex0 = 0; constexpr int kNumInputIndex1 = 1; @@ -503,4 +503,4 @@ TEST_F(ActivationMapperTest, TANHNodeMapperTest) { auto attr_size = new_prim->attrs().size(); ASSERT_EQ(attr_size, 1); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/argmax_fusion_mapper_test.cc b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/argmax_fusion_mapper_test.cc index 8d274ed6..946153d7 100644 --- a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/argmax_fusion_mapper_test.cc +++ b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/argmax_fusion_mapper_test.cc @@ -32,7 +32,7 @@ #include "mindspore/core/include/ir/dtype/number.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr int kNumInputNum1 = 1; constexpr int kNumInputNum2 = 2; @@ -385,4 +385,4 @@ TEST_F(ArgmaxFusionMapperTest, InitArgmaxFusionNodeWithoutKeepDims) { MS_LOG(INFO) << "PASS"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/argmin_fusion_mapper_test.cc b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/argmin_fusion_mapper_test.cc index 151240e8..eb5a4b62 100644 --- a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/argmin_fusion_mapper_test.cc +++ b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/argmin_fusion_mapper_test.cc @@ -32,7 +32,7 @@ #include "mindspore/core/include/ir/dtype/number.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr int kNumInputNum1 = 1; constexpr int kNumInputNum2 = 2; @@ -308,4 +308,4 @@ TEST_F(ArgminFusionMapperTest, ArgminFusionNodeMapperWithInputSize3) { ASSERT_EQ(cnode->inputs().size(), kNumInputNum3); MS_LOG(INFO) << "PASS"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/arithmetic_mapper_test.cc b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/arithmetic_mapper_test.cc index 9c91822a..d3c9ccde 100644 --- a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/arithmetic_mapper_test.cc +++ b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/arithmetic_mapper_test.cc @@ -39,7 +39,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr int kNumInputSize = 3; } // namespace @@ -500,4 +500,4 @@ TEST_F(ArithmeticMapperTest, TestExpFusion) { auto prim_name = new_prim->name(); ASSERT_EQ(prim_name, "Exp"); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/clip_mapper_test.cc b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/clip_mapper_test.cc index 1972c8ec..5cf66011 100644 --- a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/clip_mapper_test.cc +++ b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/clip_mapper_test.cc @@ -27,7 +27,7 @@ #include "mindapi/ir/tensor.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" -namespace mindspore { +namespace mindspore::lite { constexpr float kNumClipFloatMinValue = -1.0; constexpr float kNumClipFloatMaxValue = 1.0; constexpr int kNumInputMinIndex = 1; @@ -381,4 +381,4 @@ TEST_F(ClipMapperTest, FloatClipNodeWithAttr) { ASSERT_EQ(input3_is_param, true); MS_LOG(INFO) << "PASS"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/fused_batchnorm_mapper_test.cc b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/fused_batchnorm_mapper_test.cc index 23cba9ab..9f433bcb 100644 --- a/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/fused_batchnorm_mapper_test.cc +++ b/mindspore-lite/test/ut/tools/converter/adapter/acl/mapper/fused_batchnorm_mapper_test.cc @@ -32,7 +32,7 @@ #include "mindspore/core/include/ir/dtype/number.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" -namespace mindspore { +namespace mindspore::lite { class FusedBatchNormMapperTest : public mindspore::CommonTest { public: FusedBatchNormMapperTest() = default; @@ -161,4 +161,4 @@ TEST_F(FusedBatchNormMapperTest, InitFusedBatchNormNodeWithInput) { ASSERT_EQ(attr_size, 5); MS_LOG(INFO) << "PASS"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/onnx/onnx_layer_norm_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/onnx/onnx_layer_norm_parser_test.cc index fc832506..01b4d390 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/onnx/onnx_layer_norm_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/onnx/onnx_layer_norm_parser_test.cc @@ -22,7 +22,7 @@ #include "include/registry/converter_context.h" #include "include/registry/node_parser_registry.h" #include "tools/converter/parser/onnx/onnx_node_parser_registry.h" -namespace mindspore { +namespace mindspore::lite { class OnnxLayerNormParserTest : public mindspore::CommonTest { public: OnnxLayerNormParserTest() = default; @@ -127,4 +127,4 @@ TEST_F(OnnxLayerNormParserTest, OnnxLayerNormParserTest2) { auto ret = TestOnnxLayerNormNode2(); ASSERT_EQ(ret, true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc index 61d2ee8b..5abda3ef 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserRelu : public TestTfliteParser { public: @@ -138,4 +138,4 @@ TEST_F(TestTfliteParserLeakyRelu, AttrValue) { ASSERT_EQ(val.type, schema::PrimitiveType_LeakyRelu); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_addn_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_addn_parser_test.cc index 13f4396c..0a2eebb2 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_addn_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_addn_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserAddN : public TestTfliteParser { public: TestTfliteParserAddN() = default; @@ -31,4 +31,4 @@ TEST_F(TestTfliteParserAddN, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_AddN) << "wrong Op Type"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_argmax_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_argmax_parser_test.cc index dd2f8c15..6e02d8f8 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_argmax_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_argmax_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserArgmax : public TestTfliteParser { public: TestTfliteParserArgmax() = default; @@ -40,4 +40,4 @@ TEST_F(TestTfliteParserArgmax, AttrValue) { ASSERT_EQ(val->out_max_value, false); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_argmin_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_argmin_parser_test.cc index e0ac4bed..13818cc7 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_argmin_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_argmin_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserArgmin : public TestTfliteParser { public: TestTfliteParserArgmin() = default; @@ -40,4 +40,4 @@ TEST_F(TestTfliteParserArgmin, AttrValue) { ASSERT_EQ(val->out_max_value, false); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc index b2082a7e..a0fba778 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { // doubleInputOp class TestTfliteParserAdd : public TestTfliteParser { public: @@ -391,4 +391,4 @@ TEST_F(TestTfliteParserLessEqual, OpType) { ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_LessEqual) << "wrong Op Type"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_batch_to_space_nd_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_batch_to_space_nd_parser_test.cc index 3be3b051..93d9d538 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_batch_to_space_nd_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_batch_to_space_nd_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserBatchToSpaceNd : public TestTfliteParser { public: TestTfliteParserBatchToSpaceNd() = default; @@ -38,4 +38,4 @@ TEST_F(TestTfliteParserBatchToSpaceNd, AttrValue) { ASSERT_EQ(val->block_size, blockShape); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc index 22f4694d..a60b1623 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserCast : public TestTfliteParser { public: TestTfliteParserCast() = default; @@ -31,4 +31,4 @@ TEST_F(TestTfliteParserCast, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Cast) << "wrong Op Type"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_concat_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_concat_parser_test.cc index 9bc99104..50b023fb 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_concat_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_concat_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserConcat : public TestTfliteParser { public: TestTfliteParserConcat() = default; @@ -37,4 +37,4 @@ TEST_F(TestTfliteParserConcat, AttrValue) { ASSERT_EQ(val->axis, 1); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc index ca1561c6..e135ac86 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserConv : public TestTfliteParser { public: TestTfliteParserConv() = default; @@ -46,4 +46,4 @@ TEST_F(TestTfliteParserConv, AttrValue) { ASSERT_EQ(val->pad_list, (std::vector{1, 1, 1, 1})); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc index 5344644a..73e1105f 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserDeConv : public TestTfliteParser { public: TestTfliteParserDeConv() = default; @@ -48,4 +48,4 @@ TEST_F(TestTfliteParserDeConv, AttrValue) { ASSERT_EQ(val->pad_list, (std::vector{1, 1, 1, 1})); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc index 490617ca..71fdb7bb 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserDepthToSpace : public TestTfliteParser { public: TestTfliteParserDepthToSpace() = default; @@ -38,4 +38,4 @@ TEST_F(TestTfliteParserDepthToSpace, AttrValue) { ASSERT_EQ(val->block_size, 4); ASSERT_EQ(val->format, schema::Format_NHWC); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc index db9d8eee..6b2d321e 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserDepthwiseConv1 : public TestTfliteParser { public: TestTfliteParserDepthwiseConv1() = default; @@ -72,4 +72,4 @@ TEST_F(TestTfliteParserDepthwiseConv2, AttrValue) { ASSERT_EQ(val->pad_list, (std::vector{1, 1, 1, 1})); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc index 2b322e92..f1b268aa 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserFill : public TestTfliteParser { public: TestTfliteParserFill() = default; @@ -30,4 +30,4 @@ TEST_F(TestTfliteParserFill, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Fill) << "wrong Op Type"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_gather_nd_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_gather_nd_parser_test.cc index b70751d0..aa5b791e 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_gather_nd_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_gather_nd_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserGatherNd : public TestTfliteParser { public: TestTfliteParserGatherNd() = default; @@ -35,4 +35,4 @@ TEST_F(TestTfliteParserGatherNd, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsGatherNd(), nullptr); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_gather_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_gather_parser_test.cc index fb6f5d5a..1596123b 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_gather_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_gather_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserGather : public TestTfliteParser { public: TestTfliteParserGather() = default; @@ -30,4 +30,4 @@ TEST_F(TestTfliteParserGather, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Gather) << "wrong Op Type"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_l2norm_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_l2norm_parser_test.cc index 1cf02148..1e469265 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_l2norm_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_l2norm_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserL2Norm : public TestTfliteParser { public: TestTfliteParserL2Norm() = default; @@ -40,4 +40,4 @@ TEST_F(TestTfliteParserL2Norm, AttrValue) { ASSERT_EQ(val->axis, axis); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_logical_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_logical_parser_test.cc index 6c5cc051..9492420d 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_logical_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_logical_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteLogicalParserAnd : public TestTfliteParser { public: TestTfliteLogicalParserAnd() = default; @@ -57,4 +57,4 @@ TEST_F(TestTfliteParserLogicalOr, OpType) { ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_LogicalOr) << "wrong Op Type"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc index ada5b598..035b245e 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserLRN : public TestTfliteParser { public: TestTfliteParserLRN() = default; @@ -40,4 +40,4 @@ TEST_F(TestTfliteParserLRN, AttrValue) { ASSERT_EQ(val->depth_radius, 5); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_one_hot_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_one_hot_parser_test.cc index 5670b174..9cf7e1fc 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_one_hot_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_one_hot_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserOneHot : public TestTfliteParser { public: TestTfliteParserOneHot() = default; @@ -37,4 +37,4 @@ TEST_F(TestTfliteParserOneHot, AttrValue) { ASSERT_EQ(val->axis, 2); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_pad_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_pad_parser_test.cc index 880a2d84..a674c8a2 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_pad_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_pad_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserPad : public TestTfliteParser { public: TestTfliteParserPad() = default; @@ -35,4 +35,4 @@ TEST_F(TestTfliteParserPad, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsPadFusion(), nullptr); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.cc index e7002674..068f8666 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.cc @@ -18,7 +18,7 @@ #include #include "schema/inner/model_generated.h" -namespace mindspore { +namespace mindspore::lite { schema::MetaGraphT *TestTfliteParser::LoadAndConvert(const std::string &model_path, const std::string &weight_path) { return nullptr; @@ -26,4 +26,4 @@ schema::MetaGraphT *TestTfliteParser::LoadAndConvert(const std::string &model_pa void TestTfliteParser::TearDown() { free(meta_graph); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h index 3748007c..30b11be7 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h @@ -21,7 +21,7 @@ #include "common/common_test.h" #include "schema/inner/model_generated.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParser : public CommonTest { public: TestTfliteParser() = default; @@ -30,6 +30,6 @@ class TestTfliteParser : public CommonTest { schema::MetaGraphT *meta_graph = nullptr; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TEST_UT_TOOLS_CONVERTER_PARSER_TFLITE_TFLITE_PARSERS_TEST_UTILS_H_ diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc index b39e4937..dcec88a1 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserMaxPooling : public TestTfliteParser { public: TestTfliteParserMaxPooling() = default; @@ -68,4 +68,4 @@ TEST_F(TestTfliteParserAvgPooling, AttrValue) { ASSERT_EQ(val->pad_mode, schema::PadMode_SAME); ASSERT_EQ(val->round_mode, schema::RoundMode_FLOOR); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reduce_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reduce_parser_test.cc index 0ec1554e..fdcb221c 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reduce_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reduce_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserReduceMax : public TestTfliteParser { public: TestTfliteParserReduceMax() = default; @@ -122,4 +122,4 @@ TEST_F(TestTfliteParserMean, AttrValue) { // reduceAny -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc index c046b116..78d89162 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserReshape : public TestTfliteParser { public: TestTfliteParserReshape() = default; @@ -31,4 +31,4 @@ TEST_F(TestTfliteParserReshape, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Reshape) << "wrong Op Type"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_resize_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_resize_parser_test.cc index ffa2f1aa..ffd3bef6 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_resize_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_resize_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserResizeNN : public TestTfliteParser { public: @@ -65,4 +65,4 @@ TEST_F(TestTfliteParserResizeBilinear, AttrValue) { ASSERT_EQ(val->method, schema::ResizeMethod_LINEAR); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reverse_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reverse_parser_test.cc index 80b7cd2d..7b43bef5 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reverse_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reverse_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserReverse : public TestTfliteParser { public: TestTfliteParserReverse() = default; @@ -38,4 +38,4 @@ TEST_F(TestTfliteParserReverse, AttrValue) { ASSERT_EQ(val->axis, axis); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc index 46f77c63..6dc5dd38 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserReverseSequence : public TestTfliteParser { public: TestTfliteParserReverseSequence() = default; @@ -37,4 +37,4 @@ TEST_F(TestTfliteParserReverseSequence, AttrValue) { auto val = meta_graph->nodes.front()->primitive->value.AsReverseSequence(); ASSERT_EQ(val->seq_dim, 1); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_slice_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_slice_parser_test.cc index 4a7c0308..3e5e8791 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_slice_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_slice_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserSlice : public TestTfliteParser { public: TestTfliteParserSlice() = default; @@ -36,4 +36,4 @@ TEST_F(TestTfliteParserSlice, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSliceFusion(), nullptr); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc index 368e09e8..aed116f7 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserSoftmax : public TestTfliteParser { public: TestTfliteParserSoftmax() = default; @@ -38,4 +38,4 @@ TEST_F(TestTfliteParserSoftmax, AttrValue) { ASSERT_EQ(val->axis[0], -1); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc index 25feb24b..bb715630 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserSpaceToBatchND : public TestTfliteParser { public: TestTfliteParserSpaceToBatchND() = default; @@ -38,4 +38,4 @@ TEST_F(TestTfliteParserSpaceToBatchND, AttrValue) { std::vector blockshape = {2, 2}; ASSERT_EQ(val->block_shape, blockshape); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc index ba6f9111..fee63a88 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserSpaceToDepth : public TestTfliteParser { public: TestTfliteParserSpaceToDepth() = default; @@ -38,4 +38,4 @@ TEST_F(TestTfliteParserSpaceToDepth, AttrValue) { ASSERT_EQ(val->block_size, 2); ASSERT_EQ(val->format, schema::Format_NHWC); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_sparse_to_dense_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_sparse_to_dense_parser_test.cc index 0c9fe1b4..eb5190e1 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_sparse_to_dense_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_sparse_to_dense_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserSparseToDense : public TestTfliteParser { public: TestTfliteParserSparseToDense() = default; @@ -31,4 +31,4 @@ TEST_F(TestTfliteParserSparseToDense, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_SparseToDense) << "wrong Op Type"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_split_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_split_parser_test.cc index d4575caa..3b5cae9b 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_split_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_split_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserSplit : public TestTfliteParser { public: TestTfliteParserSplit() = default; @@ -41,4 +41,4 @@ TEST_F(TestTfliteParserSplit, AttrValue) { ASSERT_EQ(val->size_splits, sizeSplits); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_split_v_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_split_v_parser_test.cc index 45147f0a..82cd250b 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_split_v_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_split_v_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserSplitV : public TestTfliteParser { public: TestTfliteParserSplitV() = default; @@ -41,4 +41,4 @@ TEST_F(TestTfliteParserSplitV, AttrValue) { ASSERT_EQ(val->size_splits, sizeSplits); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_stack_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_stack_parser_test.cc index 750c03e1..dd196ead 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_stack_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_stack_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserStack : public TestTfliteParser { public: TestTfliteParserStack() = default; @@ -38,4 +38,4 @@ TEST_F(TestTfliteParserStack, AttrValue) { ASSERT_EQ(val->axis, 1); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc index adff9770..eb04e969 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserStridedSlice : public TestTfliteParser { public: TestTfliteParserStridedSlice() = default; @@ -38,4 +38,4 @@ TEST_F(TestTfliteParserStridedSlice, AttrValue) { ASSERT_EQ(val->end_mask, 0); ASSERT_EQ(val->begin_mask, 0); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc index c32603d3..89e68633 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserTile : public TestTfliteParser { public: TestTfliteParserTile() = default; @@ -38,4 +38,4 @@ TEST_F(TestTfliteParserTile, AttrValue) { std::vector dims = {2, 3, 4}; ASSERT_EQ(val->dims, dims); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc index a41db832..c6eea169 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserTopKV2 : public TestTfliteParser { public: TestTfliteParserTopKV2() = default; @@ -37,4 +37,4 @@ TEST_F(TestTfliteParserTopKV2, AttrValue) { auto val = meta_graph->nodes.front()->primitive->value.AsTopKFusion(); ASSERT_EQ(val->sorted, true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc index 4dde04af..4dc500b4 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc @@ -17,7 +17,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserTranspose : public TestTfliteParser { public: TestTfliteParserTranspose() = default; @@ -32,4 +32,4 @@ TEST_F(TestTfliteParserTranspose, OpType) { ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Transpose) << "wrong Op Type"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc index 444a2cd1..c3e5f103 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserUnique : public TestTfliteParser { public: TestTfliteParserUnique() = default; @@ -31,4 +31,4 @@ TEST_F(TestTfliteParserUnique, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Unique) << "wrong Op Type"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc index 330d00d1..cd97d2c9 100644 --- a/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc @@ -18,7 +18,7 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include "common/common_test.h" -namespace mindspore { +namespace mindspore::lite { class TestTfliteParserUnstack : public TestTfliteParser { public: TestTfliteParserUnstack() = default; @@ -37,4 +37,4 @@ TEST_F(TestTfliteParserUnstack, AttrValue) { auto val = meta_graph->nodes.front()->primitive->value.AsUnstack(); ASSERT_EQ(val->axis, 1); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/registry/model_parser_registry_test.cc b/mindspore-lite/test/ut/tools/converter/registry/model_parser_registry_test.cc index 9f57da02..0b197dc7 100644 --- a/mindspore-lite/test/ut/tools/converter/registry/model_parser_registry_test.cc +++ b/mindspore-lite/test/ut/tools/converter/registry/model_parser_registry_test.cc @@ -28,7 +28,7 @@ using mindspore::converter::ConverterParameters; using mindspore::converter::kFmkTypeCaffe; -namespace mindspore { +namespace mindspore::lite { namespace { FuncGraphPtr ConvertGraph(api::FuncGraphPtr func_graph) { auto impl = func_graph->impl(); @@ -74,4 +74,4 @@ TEST_F(ModelParserRegistryTest, TestRegistry) { ASSERT_EQ(is_return, true); delete model_parser; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/registry/node_parser_registry_test.cc b/mindspore-lite/test/ut/tools/converter/registry/node_parser_registry_test.cc index d23e401b..0636f5a6 100644 --- a/mindspore-lite/test/ut/tools/converter/registry/node_parser_registry_test.cc +++ b/mindspore-lite/test/ut/tools/converter/registry/node_parser_registry_test.cc @@ -27,7 +27,7 @@ #include "mindspore/core/include/ir/graph_utils.h" using mindspore::converter::kFmkTypeTf; -namespace mindspore { +namespace mindspore::lite { namespace converter { class AddNodeParser : public NodeParser { public: @@ -81,4 +81,4 @@ TEST_F(NodeParserRegistryTest, TestRegistry) { auto prim = api::GetValueNode(cnode->input(0)); ASSERT_NE(prim, nullptr); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/registry/parser/model_parser_test.cc b/mindspore-lite/test/ut/tools/converter/registry/parser/model_parser_test.cc index f15f60ff..7746a025 100644 --- a/mindspore-lite/test/ut/tools/converter/registry/parser/model_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/registry/parser/model_parser_test.cc @@ -25,7 +25,7 @@ #include "mindapi/ir/tensor.h" #include "infer/return.h" -namespace mindspore { +namespace mindspore::lite { api::FuncGraphPtr ModelParserTest::Parse(const converter::ConverterParameters &flag) { // construct funcgraph res_graph_ = api::FuncGraph::Create(); @@ -173,4 +173,4 @@ converter::ModelParser *TestModelParserCreator() { } return model_parser; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/registry/parser/model_parser_test.h b/mindspore-lite/test/ut/tools/converter/registry/parser/model_parser_test.h index d7162984..5a5dffb6 100644 --- a/mindspore-lite/test/ut/tools/converter/registry/parser/model_parser_test.h +++ b/mindspore-lite/test/ut/tools/converter/registry/parser/model_parser_test.h @@ -25,7 +25,7 @@ #include "mindapi/ir/anf.h" #include "ut/tools/converter/registry/parser/node_parser_test.h" -namespace mindspore { +namespace mindspore::lite { class ModelParserTest : public converter::ModelParser { public: ModelParserTest() = default; @@ -42,6 +42,6 @@ class ModelParserTest : public converter::ModelParser { }; converter::ModelParser *TestModelParserCreator(); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TEST_UT_TOOLS_CONVERTER_REGISTRY_PARSER_MODEL_PARSER_TEST_H_ diff --git a/mindspore-lite/test/ut/tools/converter/registry/parser/node_parser_test.cc b/mindspore-lite/test/ut/tools/converter/registry/parser/node_parser_test.cc index bfc1e0c0..0d94a887 100644 --- a/mindspore-lite/test/ut/tools/converter/registry/parser/node_parser_test.cc +++ b/mindspore-lite/test/ut/tools/converter/registry/parser/node_parser_test.cc @@ -25,7 +25,7 @@ #include "infer/cxx_api/add_fusion.h" #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" -namespace mindspore { +namespace mindspore::lite { class AddNodeParserTest : public NodeParserTest { public: AddNodeParserTest() = default; @@ -87,4 +87,4 @@ REGISTER_NODE_PARSER_TEST(kAdd, std::make_shared()) REGISTER_NODE_PARSER_TEST(kSplit, std::make_shared()) REGISTER_NODE_PARSER_TEST(kConcat, std::make_shared()) REGISTER_NODE_PARSER_TEST(kProposal, std::make_shared()) -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/registry/parser/node_parser_test.h b/mindspore-lite/test/ut/tools/converter/registry/parser/node_parser_test.h index c102a100..6aa17607 100644 --- a/mindspore-lite/test/ut/tools/converter/registry/parser/node_parser_test.h +++ b/mindspore-lite/test/ut/tools/converter/registry/parser/node_parser_test.h @@ -24,7 +24,7 @@ #include "mindapi/base/shared_ptr.h" #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { using BaseOperatorPtr = api::SharedPtr; class NodeParserTest { public: @@ -68,5 +68,5 @@ class RegisterNodeParserTest { #define REGISTER_NODE_PARSER_TEST(name, node_parser) \ static RegisterNodeParserTest g_##name##_node_parser(name, node_parser); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TEST_UT_TOOLS_CONVERTER_REGISTRY_PARSER_NODE_PARSER_TEST_H_ diff --git a/mindspore-lite/test/ut/tools/converter/registry/pass_registry_position_ascend.cc b/mindspore-lite/test/ut/tools/converter/registry/pass_registry_position_ascend.cc index 8c5863fc..909da4e7 100644 --- a/mindspore-lite/test/ut/tools/converter/registry/pass_registry_position_ascend.cc +++ b/mindspore-lite/test/ut/tools/converter/registry/pass_registry_position_ascend.cc @@ -24,7 +24,7 @@ #include "src/common/log_adapter.h" using mindspore::registry::POSITION_ASCEND; -namespace mindspore { +namespace mindspore::lite { class PassRegistryPositionAscendTest : public mindspore::CommonTest { public: PassRegistryPositionAscendTest() = default; @@ -55,4 +55,4 @@ TEST_F(PassRegistryPositionAscendTest, RunPassAtPositionAscend) { ASSERT_EQ(ret, true); MS_LOG(INFO) << "PASS"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/converter/registry/pass_registry_test.cc b/mindspore-lite/test/ut/tools/converter/registry/pass_registry_test.cc index 10aa1d1a..6d6a31bc 100644 --- a/mindspore-lite/test/ut/tools/converter/registry/pass_registry_test.cc +++ b/mindspore-lite/test/ut/tools/converter/registry/pass_registry_test.cc @@ -40,7 +40,7 @@ using mindspore::converter::ConverterParameters; using mindspore::converter::kFmkTypeCaffe; using mindspore::registry::POSITION_BEGIN; -namespace mindspore { +namespace mindspore::lite { namespace { FuncGraphPtr ConvertGraph(api::FuncGraphPtr func_graph) { auto impl = func_graph->impl(); @@ -245,4 +245,4 @@ TEST_F(PassRegistryTest, TestRegistry) { bool is_return = opt::CheckPrimitiveType(cnode_list.back(), prim::kPrimReturn); ASSERT_EQ(is_return, true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/activation_fusion_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/activation_fusion_test.cc index f0ffb008..59c7bcb5 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/activation_fusion_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/activation_fusion_test.cc @@ -24,7 +24,7 @@ #include "tools/lite_exporter/anf_exporter.h" #include "test/common/import_from_meta_graphT.h" -namespace mindspore { +namespace mindspore::lite { class ActivationFusionTest : public mindspore::CommonTest { public: ActivationFusionTest() = default; @@ -148,4 +148,4 @@ TEST_F(ActivationFusionTest, TestBadCase_ReluSigmoid) { ASSERT_EQ(first_node->primitive->value.AsActivation()->activation_type, schema::ActivationType_RELU); ASSERT_EQ(second_node->primitive->value.AsActivation()->activation_type, schema::ActivationType_SIGMOID); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/add_concat_act_fusion_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/add_concat_act_fusion_test.cc index ba395578..d98caaff 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/add_concat_act_fusion_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/add_concat_act_fusion_test.cc @@ -25,7 +25,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "test/common/import_from_meta_graphT.h" -namespace mindspore { +namespace mindspore::lite { constexpr size_t kAddInputTensorWSize = 128; constexpr size_t kConcatInputTensorWDims = 256; constexpr size_t kGraphNodeSize = 3; @@ -166,4 +166,4 @@ TEST_F(AddConcatActivationFusionTest, TestAddConcatReluNode) { } } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc index 41f62eb7..b0d04c39 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc @@ -25,7 +25,7 @@ #include "tools/lite_exporter/anf_exporter.h" #include "test/common/import_from_meta_graphT.h" -namespace mindspore { +namespace mindspore::lite { class ConstantFoldingFusionTest : public mindspore::CommonTest { public: ConstantFoldingFusionTest() = default; @@ -590,4 +590,4 @@ TEST_F(ConstantFoldingFusionTest, TestSplitConstantFold) { auto new_meta_graph = lite::Export(new_graph); ASSERT_EQ(new_meta_graph->nodes.size(), 0); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc index cd6c5b84..dab6e7e4 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc @@ -24,7 +24,7 @@ #include "tools/lite_exporter/anf_exporter.h" #include "test/common/import_from_meta_graphT.h" -namespace mindspore { +namespace mindspore::lite { class ConvActivationFusionTest : public mindspore::CommonTest { public: ConvActivationFusionTest() = default; @@ -171,4 +171,4 @@ TEST_F(ConvActivationFusionTest, TestBadCase_ConvRelu) { } } } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc index 4b3a5a30..6e249bae 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc @@ -24,7 +24,7 @@ #include "tools/lite_exporter/anf_exporter.h" #include "test/common/import_from_meta_graphT.h" -namespace mindspore { +namespace mindspore::lite { class ConvBiasAddFusionTest : public mindspore::CommonTest { public: ConvBiasAddFusionTest() = default; @@ -170,4 +170,4 @@ TEST_F(ConvBiasAddFusionTest, TestBadCase_ConvAdd) { auto new_meta_graph = lite::Export(func_graph); ASSERT_EQ(new_meta_graph->nodes.size(), 2); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc index fec41e88..800a4a9b 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc @@ -24,7 +24,7 @@ #include "tools/lite_exporter/anf_exporter.h" #include "test/common/import_from_meta_graphT.h" -namespace mindspore { +namespace mindspore::lite { class ConvBNFusionTest : public mindspore::CommonTest { public: ConvBNFusionTest() = default; @@ -276,4 +276,4 @@ TEST_F(ConvBNFusionTest, TestDeptiwiseConvAddNode) { auto new_meta_graph = lite::Export(func_graph); ASSERT_EQ(new_meta_graph->nodes.size(), 1); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc index af346fb1..04786ae1 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc @@ -24,7 +24,7 @@ #include "tools/lite_exporter/anf_exporter.h" #include "test/common/import_from_meta_graphT.h" -namespace mindspore { +namespace mindspore::lite { class ConvScaleFusionTest : public mindspore::CommonTest { public: ConvScaleFusionTest() = default; @@ -206,4 +206,4 @@ TEST_F(ConvScaleFusionTest, TestDeptiwiseConvScaleNode) { } delete anf_transform; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/activation_fusion_inout_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/activation_fusion_inout_test.cc index de4ee5bd..30420fe5 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/activation_fusion_inout_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/activation_fusion_inout_test.cc @@ -21,7 +21,7 @@ #include "nnacl_c/op_base.h" #include "infer/cxx_api/activation.h" -namespace mindspore { +namespace mindspore::lite { namespace { inline const int kActMinVal = -20; inline const int kActMaxVal = 6; @@ -81,4 +81,4 @@ class ActivationFusionInoutTest : public FusionInoutTest { }; TEST_F(ActivationFusionInoutTest, test) { ASSERT_EQ(DoTest(), true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/add_concat_act_fusion_inout_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/add_concat_act_fusion_inout_test.cc index 5f304d49..99bd0227 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/add_concat_act_fusion_inout_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/add_concat_act_fusion_inout_test.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" #include "infer/cxx_api/add_fusion.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr size_t kAddInputTensorWSize = 128; } // namespace @@ -124,4 +124,4 @@ class ConcatActFusionInoutTest : public FusionInoutTest { }; TEST_F(ConcatActFusionInoutTest, test) { ASSERT_EQ(DoTest(), true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_act_fusion_inout_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_act_fusion_inout_test.cc index c53b7eaf..b04b521e 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_act_fusion_inout_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_act_fusion_inout_test.cc @@ -21,7 +21,7 @@ #include "nnacl_c/op_base.h" #include "infer/cxx_api/activation.h" -namespace mindspore { +namespace mindspore::lite { class ConvActFusionInoutTest : public ConvFusionInoutTest { public: ConvActFusionInoutTest() = default; @@ -72,4 +72,4 @@ class ConvActFusionInoutTest : public ConvFusionInoutTest { }; TEST_F(ConvActFusionInoutTest, test) { ASSERT_EQ(DoTest(), true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_bias_fusion_inout_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_bias_fusion_inout_test.cc index be04c79d..ab5ba104 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_bias_fusion_inout_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_bias_fusion_inout_test.cc @@ -21,7 +21,7 @@ #include "nnacl_c/op_base.h" #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" -namespace mindspore { +namespace mindspore::lite { class ConvBiasFusionInoutTest : public ConvFusionInoutTest { public: ConvBiasFusionInoutTest() = default; @@ -72,4 +72,4 @@ class ConvBiasFusionInoutTest : public ConvFusionInoutTest { }; TEST_F(ConvBiasFusionInoutTest, test) { ASSERT_EQ(DoTest(), true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_fusion_inout_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_fusion_inout_test.cc index ac5766cf..822ec797 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_fusion_inout_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_fusion_inout_test.cc @@ -22,7 +22,7 @@ #include "infer/cxx_api/conv2d_fusion.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { ValueNodePtr ConvFusionInoutTest::CreateConvPrimitiveValue() { auto prim = std::make_unique(); MS_CHECK_TRUE_MSG(prim != nullptr, nullptr, "create Conv2d primitivec failed"); @@ -44,4 +44,4 @@ CNodePtr ConvFusionInoutTest::AddConv(const FuncGraphPtr &graph, const AnfNodePt conv->set_fullname_with_scope(name); return conv; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_fusion_inout_test.h b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_fusion_inout_test.h index dc2bc785..3cbe235c 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_fusion_inout_test.h +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/conv_fusion_inout_test.h @@ -24,7 +24,7 @@ #include "include/backend/optimizer/optimizer.h" #include "include/backend/optimizer/pass_manager.h" -namespace mindspore { +namespace mindspore::lite { class ConvFusionInoutTest : public FusionInoutTest { public: ConvFusionInoutTest() = default; @@ -42,5 +42,5 @@ class ConvFusionInoutTest : public FusionInoutTest { static const int ih_ = 16; static const int iw_ = 16; }; -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/fusion_inout_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/fusion_inout_test.cc index e3fb942e..c002e155 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/fusion_inout_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/fusion_inout_test.cc @@ -28,7 +28,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore { +namespace mindspore::lite { FuncGraphPtr FusionInoutTest::Fuse() { if (graph_ == nullptr) { MS_LOG(WARNING) << "Graph not inited"; @@ -169,4 +169,4 @@ bool FusionInoutTest::DoTest() { auto new_outputs_num = GetOutputNumber(); return old_inputs == new_inputs && old_outputs_num == new_outputs_num; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/fusion_inout_test.h b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/fusion_inout_test.h index 42d3f351..bdae8bde 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/fusion_inout_test.h +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/fusion_inout_test.h @@ -26,7 +26,7 @@ #include "include/backend/optimizer/pass_manager.h" #include "src/common/log_util.h" -namespace mindspore { +namespace mindspore::lite { class FusionInoutTest : public mindspore::CommonTest { public: FusionInoutTest() = default; @@ -53,5 +53,5 @@ class FusionInoutTest : public mindspore::CommonTest { opt::PassPtr pass_ = nullptr; FuncGraphPtr graph_ = nullptr; }; -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_act_fusion_inout_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_act_fusion_inout_test.cc index e6ea162d..8b3e1ebd 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_act_fusion_inout_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_act_fusion_inout_test.cc @@ -22,7 +22,7 @@ #include "infer/cxx_api/mat_mul_fusion.h" #include "infer/cxx_api/activation.h" -namespace mindspore { +namespace mindspore::lite { class MatMulActivationFusionInoutTest : public FusionInoutTest { public: MatMulActivationFusionInoutTest() = default; @@ -90,4 +90,4 @@ class MatMulActivationFusionInoutTest : public FusionInoutTest { }; TEST_F(MatMulActivationFusionInoutTest, test) { ASSERT_EQ(DoTest(), true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_fusion_inout_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_fusion_inout_test.cc index fefb608d..055cf890 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_fusion_inout_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_fusion_inout_test.cc @@ -22,7 +22,7 @@ #include "infer/cxx_api/mat_mul_fusion.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { CNodePtr MatMulFusionInoutTest::AddMatMul(const FuncGraphPtr &graph, const AnfNodePtr &input1, const AnfNodePtr &input2, const ActivationType &act_type, const std::string &name) { auto prim = std::make_unique(); @@ -36,4 +36,4 @@ CNodePtr MatMulFusionInoutTest::AddMatMul(const FuncGraphPtr &graph, const AnfNo matmul->set_fullname_with_scope(name); return matmul; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_fusion_inout_test.h b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_fusion_inout_test.h index 57865288..cda33e78 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_fusion_inout_test.h +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_fusion_inout_test.h @@ -26,7 +26,7 @@ #include "include/backend/optimizer/pass_manager.h" #include "infer/cxx_api/activation.h" -namespace mindspore { +namespace mindspore::lite { class MatMulFusionInoutTest : public FusionInoutTest { public: MatMulFusionInoutTest() = default; @@ -35,5 +35,5 @@ class MatMulFusionInoutTest : public FusionInoutTest { CNodePtr AddMatMul(const FuncGraphPtr &graph, const AnfNodePtr &input1, const AnfNodePtr &input2, const ActivationType &act_type, const std::string &name); }; -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_mul_fusion_inout_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_mul_fusion_inout_test.cc index 5e6ac368..168dab24 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_mul_fusion_inout_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/matmul_mul_fusion_inout_test.cc @@ -22,7 +22,7 @@ #include "infer/cxx_api/mul_fusion.h" #include "infer/cxx_api/mat_mul_fusion.h" -namespace mindspore { +namespace mindspore::lite { class MatmulMulFusionInoutTest : public FusionInoutTest { public: MatmulMulFusionInoutTest() = default; @@ -90,4 +90,4 @@ class MatmulMulFusionInoutTest : public FusionInoutTest { }; TEST_F(MatmulMulFusionInoutTest, test) { ASSERT_EQ(DoTest(), true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/trans_matmul_fusion_inout_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/trans_matmul_fusion_inout_test.cc index 1d40ce2c..60e53fe1 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/trans_matmul_fusion_inout_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/fusion_inout_test/trans_matmul_fusion_inout_test.cc @@ -22,7 +22,7 @@ #include "nnacl_c/op_base.h" #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" -namespace mindspore { +namespace mindspore::lite { namespace { inline const int kHeight = 5; inline const int kChannel = 3; @@ -88,4 +88,4 @@ class TransMatMulFusionInoutTest : public MatMulFusionInoutTest { }; TEST_F(TransMatMulFusionInoutTest, test) { ASSERT_EQ(DoTest(), true); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/matmul_mul_fusion_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/matmul_mul_fusion_test.cc index ec6818d8..3172baab 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/matmul_mul_fusion_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/matmul_mul_fusion_test.cc @@ -25,7 +25,7 @@ #include "test/common/import_from_meta_graphT.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr int kMatMulInputDimsM = 128; constexpr int kMatMulInputDimsK = 225; @@ -141,4 +141,4 @@ TEST_F(MatMulAddFusionTest, TestMatMulMulNode) { ASSERT_EQ(new_meta_graph->nodes.size(), 1); MS_LOG(INFO) << "Passed"; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/test/ut/tools/optimizer/fusion/trans_matmul_fusion_test.cc b/mindspore-lite/test/ut/tools/optimizer/fusion/trans_matmul_fusion_test.cc index be87aa06..a1d54ee6 100644 --- a/mindspore-lite/test/ut/tools/optimizer/fusion/trans_matmul_fusion_test.cc +++ b/mindspore-lite/test/ut/tools/optimizer/fusion/trans_matmul_fusion_test.cc @@ -25,7 +25,7 @@ #include "tools/lite_exporter/anf_exporter.h" #include "test/common/import_from_meta_graphT.h" -namespace mindspore { +namespace mindspore::lite { class TransMatMulFusionTest : public mindspore::CommonTest { public: TransMatMulFusionTest() = default; @@ -209,4 +209,4 @@ TEST_F(TransMatMulFusionTest, TestBadCase_TransMatMul) { ASSERT_EQ(cnode->primitive->value.AsMatMulFusion()->transpose_a, false); ASSERT_EQ(cnode->primitive->value.AsMatMulFusion()->transpose_b, false); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/benchmark/benchmark_c_api.cc b/mindspore-lite/tools/benchmark/benchmark_c_api.cc index d66c65f6..065f93aa 100644 --- a/mindspore-lite/tools/benchmark/benchmark_c_api.cc +++ b/mindspore-lite/tools/benchmark/benchmark_c_api.cc @@ -24,7 +24,7 @@ using mindspore::lite::kFloatMSEC; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace tools { int BenchmarkCApi::LoadInput() { if (flags_->in_data_file_.empty()) { @@ -416,7 +416,7 @@ int BenchmarkCApi::InitDumpTensorDataCallbackParameter() { return RET_ERROR; } } // namespace tools -} // namespace mindspore +} // namespace mindspore::lite uint64_t g_op_begin_ = 0; int g_op_call_times_total_ = 0; diff --git a/mindspore-lite/tools/benchmark/benchmark_c_api.h b/mindspore-lite/tools/benchmark/benchmark_c_api.h index 4ba9649a..2a9073bd 100644 --- a/mindspore-lite/tools/benchmark/benchmark_c_api.h +++ b/mindspore-lite/tools/benchmark/benchmark_c_api.h @@ -36,7 +36,7 @@ bool TimeAfterCallback(const MSTensorHandleArray inputs, const MSTensorHandleArr using mindspore::lite::BenchmarkBase; using mindspore::lite::BenchmarkFlags; -namespace mindspore::tools { +namespace mindspore::lite::tools { class MS_API BenchmarkCApi : public BenchmarkBase { public: explicit BenchmarkCApi(BenchmarkFlags *flags) : BenchmarkBase(flags) {} @@ -74,5 +74,5 @@ class MS_API BenchmarkCApi : public BenchmarkBase { MSKernelCallBackC before_call_back_ = nullptr; MSKernelCallBackC after_call_back_ = nullptr; }; -} // namespace mindspore::tools +} // namespace mindspore::lite::tools #endif // MINDSPORE_LITE_TOOLS_BENCHMARK_BENCHMARK_C_API_H_ diff --git a/mindspore-lite/tools/common/custom_ascend_utils.cc b/mindspore-lite/tools/common/custom_ascend_utils.cc index a3b46694..ac970f7c 100644 --- a/mindspore-lite/tools/common/custom_ascend_utils.cc +++ b/mindspore-lite/tools/common/custom_ascend_utils.cc @@ -24,7 +24,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr auto kCustomPrimTypeACL = "ACL"; constexpr auto kCustomNodeName = "custom_0"; @@ -536,4 +536,4 @@ bool CustomAscendUtils::ParseCustomFuncGraph(const FuncGraphPtr &func_graph, ten *graph_name = input_last->fullname_with_scope(); return true; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/common/custom_ascend_utils.h b/mindspore-lite/tools/common/custom_ascend_utils.h index bec6d2db..d0581492 100644 --- a/mindspore-lite/tools/common/custom_ascend_utils.h +++ b/mindspore-lite/tools/common/custom_ascend_utils.h @@ -34,7 +34,7 @@ #include "mindspore/ops/infer/custom.h" #include "src/common/common.h" -namespace mindspore { +namespace mindspore::lite { struct DynKVCacheSaveInfo { bool batch_size_dyn = false; bool seq_length_dyn = false; @@ -102,5 +102,5 @@ class MS_API CustomAscendUtils { static bool GetZeroValueRefDatas(const ops::PrimitiveCPtr &primc, std::vector> *ref_infos); }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_EXTENDRT_UTILS_CUSTOM_ASCEND_UTILS_H_ diff --git a/mindspore-lite/tools/common/func_graph_utils.cc b/mindspore-lite/tools/common/func_graph_utils.cc index 73f7ec2a..14d49a48 100644 --- a/mindspore-lite/tools/common/func_graph_utils.cc +++ b/mindspore-lite/tools/common/func_graph_utils.cc @@ -19,7 +19,7 @@ #include #include "tools/common/graph_util.h" #include "tools/converter/converter_context.h" -namespace mindspore { +namespace mindspore::lite { AbstractBasePtr FuncGraphUtils::GetAbstractFromNode(const std::pair &node) { auto anfnode = node.first; MS_EXCEPTION_IF_NULL(anfnode); @@ -134,4 +134,4 @@ tensor::TensorPtr FuncGraphUtils::GetParameterConstValue(const AnfNodePtr &anf_n } return tensor; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/common/func_graph_utils.h b/mindspore-lite/tools/common/func_graph_utils.h index df2b3185..9175dfa5 100644 --- a/mindspore-lite/tools/common/func_graph_utils.h +++ b/mindspore-lite/tools/common/func_graph_utils.h @@ -24,7 +24,7 @@ #include #include "ir/func_graph.h" -namespace mindspore { +namespace mindspore::lite { class FuncGraphUtils { public: static std::vector GetFuncGraphOutputNames(const FuncGraphPtr &func_graph); @@ -34,6 +34,6 @@ class FuncGraphUtils { static std::string GetOutputName(const std::pair &node_index); static tensor::TensorPtr GetParameterConstValue(const AnfNodePtr &anf_node); }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOL_COMMON_FUNC_GRAPH_UTILS_H_ diff --git a/mindspore-lite/tools/common/opengl_util.cc b/mindspore-lite/tools/common/opengl_util.cc index dfa7537c..c47256f1 100644 --- a/mindspore-lite/tools/common/opengl_util.cc +++ b/mindspore-lite/tools/common/opengl_util.cc @@ -18,7 +18,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace OpenGL { #if defined(GPU_OPENCL) && defined(__ANDROID__) && defined(ENABLE_ARM64) const char *g_glsl_host_to_device_2d = @@ -494,4 +494,4 @@ GLuint OpenGLRuntime::CopyHostToDeviceTexture(void *hostData, int width, int hei void OpenGLRuntime::PrintImage2DData(float *data, int w, int h, int c) {} #endif } // namespace OpenGL -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/common/opengl_util.h b/mindspore-lite/tools/common/opengl_util.h index 9d13fae8..49cd107a 100644 --- a/mindspore-lite/tools/common/opengl_util.h +++ b/mindspore-lite/tools/common/opengl_util.h @@ -61,7 +61,7 @@ inline EGLContext eglGetCurrentDisplay(void) { return nullptr; } #define OPENGL_CHECK_ERROR #endif -namespace mindspore { +namespace mindspore::lite { namespace OpenGL { #define BIND_INDEX_0 0 #define BIND_INDEX_1 1 @@ -100,6 +100,6 @@ class OpenGLRuntime { EGLSurface m_surface_ = nullptr; }; } // namespace OpenGL -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_COMMON_OPENGL_UTIL_H_ diff --git a/mindspore-lite/tools/converter/adapter/acl/acl_pass.cc b/mindspore-lite/tools/converter/adapter/acl/acl_pass.cc index abb42f94..cf8efb67 100644 --- a/mindspore-lite/tools/converter/adapter/acl/acl_pass.cc +++ b/mindspore-lite/tools/converter/adapter/acl/acl_pass.cc @@ -20,7 +20,7 @@ #include "tools/converter/adapter/acl/src/acl_memory_offload_pass_impl.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace opt { AclPass::AclPass(const std::shared_ptr ¶m) : Pass("ACL") { #ifdef ENABLE_LITE_ACL @@ -49,4 +49,4 @@ bool AclPass::Run(const FuncGraphPtr &func_graph) { #endif } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/acl_pass.h b/mindspore-lite/tools/converter/adapter/acl/acl_pass.h index 24cb5da5..b7db321f 100644 --- a/mindspore-lite/tools/converter/adapter/acl/acl_pass.h +++ b/mindspore-lite/tools/converter/adapter/acl/acl_pass.h @@ -22,7 +22,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/converter/cxx_api/converter_para.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AclPassImpl; using AclPassImplPtr = std::shared_ptr; @@ -38,5 +38,5 @@ class AclPass : public Pass { AclPassImplPtr impl_ = nullptr; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_ACL_ACL_PASS_H_ diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/acl_utils.h b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/acl_utils.h index 34cd7b32..31392d39 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/acl_utils.h +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/acl_utils.h @@ -22,7 +22,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_base_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { static inline bool IsAscend910Soc() { const char *soc_name_c = CALL_ASCEND_API(aclrtGetSocName); if (soc_name_c == nullptr) { @@ -56,6 +56,6 @@ static inline std::string TransforPrecisionToAcl(const std::string &precision_mo {"preferred_optimal", "allow_mix_precision"}}; return precision_map.find(precision_mode) != precision_map.end() ? precision_map.at(precision_mode) : precision_mode; } -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_CXX_API_ACL_UTILS_H diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/any_utils.cc b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/any_utils.cc index bf44c7a9..ef0a80bd 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/any_utils.cc +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/any_utils.cc @@ -16,7 +16,7 @@ #include "cxx_api/any_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { template >> static U GetValue(const std::map &any_map, const std::string &key) { @@ -94,4 +94,4 @@ std::map> GetAnyValueInputShape(const std::map>>(any_map, name); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/any_utils.h b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/any_utils.h index 9ec0bf80..c36ed8e8 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/any_utils.h +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/any_utils.h @@ -24,7 +24,7 @@ #include "include/api/visible.h" #include "include/api/data_type.h" -namespace mindspore { +namespace mindspore::lite { // std::any is not support to access across shared libraries, so add an adapter to access std::any MS_API void SetAnyValue(std::any *any, bool value); MS_API void SetAnyValue(std::any *any, int value); @@ -40,5 +40,5 @@ MS_API DataType GetAnyValueDataType(const std::map &any_m MS_API std::string GetAnyValueStr(const std::map &any_map, const std::string &name); MS_API std::map> GetAnyValueInputShape(const std::map &any_map, const std::string &name); -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_CXX_API_ANY_UTILS_H_ diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/context.cc b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/context.cc index 7bdb4a39..40036b67 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/context.cc +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/context.cc @@ -39,7 +39,7 @@ constexpr auto KModelOptionAscend310FusionSwitchCfgPath = "mindspore.option.asce constexpr auto kModelOptionAscend310DynamicBatchSize = "mindspore.option.ascend310.dynamic_batch_size"; constexpr auto kModelOptionAscend310BufferOptimize = "mindspore.option.ascend310.buffer_optimize"; -namespace mindspore { +namespace mindspore::lite { class Allocator {}; struct Context::Data { @@ -308,4 +308,4 @@ std::vector AscendDeviceInfo::GetBufferOptimizeModeChar() const { const std::string &ref = GetAnyValueStr(data_->params, kModelOptionAscend310BufferOptimize); return StringToChar(ref); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/dlutils.h b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/dlutils.h index d4cb5617..c83ac93c 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/dlutils.h +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/dlutils.h @@ -24,7 +24,7 @@ #include "include/api/status.h" #include "utils/file_utils.h" -namespace mindspore { +namespace mindspore::lite { inline Status DLSoPath(std::string *so_path) { if (so_path == nullptr) { return Status(kMEFailed, "Input so_path can not be nullptr."); @@ -102,6 +102,6 @@ inline void DLSoClose(void *handle) { } \ } while (false) -} // namespace mindspore +} // namespace mindspore::lite #endif #endif // MINDSPORE_CCSRC_CXX_API_DLUTILS_H_ diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.cc b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.cc index d46c8de9..ec3f9b6d 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.cc +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.cc @@ -21,7 +21,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { AclConvertInitAdapter &AclConvertInitAdapter::GetInstance() { static AclConvertInitAdapter instance = {}; return instance; @@ -65,6 +65,7 @@ ge::graphStatus AclConvertInitAdapter::AclBuildInit(const std::map lock(build_flag_mutex_); @@ -74,3 +75,6 @@ void AclConvertInitAdapter::AclBuildFinalize() { } } } // namespace mindspore +======= +} // namespace mindspore::lite +>>>>>>> add nameapce lite diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.h b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.h index 81ccfbca..ecbf0266 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.h +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.h @@ -24,7 +24,7 @@ #include "ge/ge_ir_build.h" #include "include/api/visible.h" -namespace mindspore { +namespace mindspore::lite { class MS_API AclConvertInitAdapter { public: static AclConvertInitAdapter &GetInstance(); @@ -43,5 +43,5 @@ class MS_API AclConvertInitAdapter { std::mutex flag_mutex_; std::mutex build_flag_mutex_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_ACL_ACL_ACL_CONVERT_INIT_ADAPTER_H diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/graph_data.cc b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/graph_data.cc index 2781f9ec..dd83793e 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/graph_data.cc +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/graph_data.cc @@ -18,7 +18,7 @@ #include "utils/ms_context.h" #include "runtime/hardware_abstract/device_context/device_context_manager.h" -namespace mindspore { +namespace mindspore::lite { Graph::GraphData::GraphData(const FuncGraphPtr &func_graph, enum ModelType model_type) : func_graph_(nullptr), om_data_(), model_type_(ModelType::kUnknownType), data_graph_({}) { if (model_type != ModelType::kMindIR) { @@ -58,4 +58,4 @@ Buffer Graph::GraphData::GetOMData() const { void Graph::GraphData::SetPreprocess(const std::vector> &data_graph) { data_graph_ = data_graph; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/graph_data.h b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/graph_data.h index 168eeb53..ec38f0e3 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/graph_data.h +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/graph_data.h @@ -25,7 +25,7 @@ #include "include/dataset/execute.h" #include "ir/func_graph.h" -namespace mindspore { +namespace mindspore::lite { class Graph::GraphData { public: GraphData(); @@ -52,5 +52,5 @@ class Graph::GraphData { enum ModelType model_type_; std::vector> data_graph_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_CXX_API_GRAPH_GRAPH_DATA_H diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/acl_model_options.cc b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/acl_model_options.cc index ca0ff14d..09d0f8e9 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/acl_model_options.cc +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/acl_model_options.cc @@ -22,7 +22,7 @@ #include "plugin/ascend/res_manager/symbol_interface/acl_base_symbol.h" #include "plugin/ascend/res_manager/symbol_interface/symbol_utils.h" -namespace mindspore { +namespace mindspore::lite { static const std::map kSupportedDtypeOptionMap = {{DataType::kNumberTypeFloat16, "FP16"}, {DataType::kNumberTypeFloat32, "FP32"}, {DataType::kNumberTypeUInt8, "UINT8"}}; @@ -223,4 +223,4 @@ std::string AclModelOptions::GenAclOptionsKey() const { } return key_str; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/acl_model_options.h b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/acl_model_options.h index b80fc24d..6cf65732 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/acl_model_options.h +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/acl_model_options.h @@ -26,7 +26,7 @@ #include "include/api/status.h" #include "include/api/context.h" -namespace mindspore { +namespace mindspore::lite { class MS_API AclModelOptions { public: explicit AclModelOptions(const std::shared_ptr &context); @@ -90,6 +90,6 @@ class MS_API AclModelOptions { std::vector const_names_; bool is_last_model_ = false; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_CXXAPI_SESSION_ACL_OPTION_PARSER_H diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/model_converter.cc b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/model_converter.cc index 235bea7a..1f8dbc4e 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/model_converter.cc +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/model_converter.cc @@ -29,7 +29,7 @@ #include "src/common/file_utils.h" #include "cxx_api/graph/acl/acl_convert_init_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace { // some config is not supported in the update subgraph, do not add to the update_options. e.g. lora weight update. const std::set update_options_blacklist = { @@ -249,4 +249,4 @@ Buffer ModelConverter::LoadAscendIRInner(const Buffer &model_data) { #endif return BuildAirModel(df_graph, init_options, build_options); } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/model_converter.h b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/model_converter.h index d64117a4..52e509aa 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/model_converter.h +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/acl/model_converter.h @@ -26,7 +26,7 @@ #include "ge/ge_ir_build.h" #include "cxx_api/model/acl/acl_model_options.h" -namespace mindspore { +namespace mindspore::lite { class MS_API ModelConverter { public: ModelConverter() : options_() {} @@ -46,5 +46,5 @@ class MS_API ModelConverter { std::weak_ptr options_; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_CXXAPI_SESSION_ACL_MODEL_CONVERTER_H diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/aoe/auto_tune_process.cc b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/aoe/auto_tune_process.cc index 316f81cf..772e6fb4 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/aoe/auto_tune_process.cc +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/model/aoe/auto_tune_process.cc @@ -23,7 +23,7 @@ #include #include "src/common/file_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr int kBuffSize = 1024; const std::map kTuneModeMap = {{"1", "subgraph tuning"}, {"2", "operator tuning"}}; @@ -155,4 +155,4 @@ Status AutoTuneProcess::AoeOfflineTurningGraph(const std::weak_ptr &options, const backend::ge_backend::DfGraphPtr &graph); }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_CCSRC_CXXAPI_MODEL_AOE_AUTO_TUNE_PROCESS_H diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/serialization.cc b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/serialization.cc index 55a3dc26..b6d73da2 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/serialization.cc +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/serialization.cc @@ -25,7 +25,7 @@ #endif #include "utils/crypto.h" -namespace mindspore { +namespace mindspore::lite { static Status RealPath(const std::string &file, std::string *realpath_str) { MS_EXCEPTION_IF_NULL(realpath_str); char real_path_mem[PATH_MAX] = {0}; @@ -350,4 +350,4 @@ Status Serialization::ExportWeightsCollaborateWithMicro(const Model &, ModelType MS_LOG(ERROR) << "Unsupported feature."; return kMEFailed; } -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/infer/flash_attention_infer.cc b/mindspore-lite/tools/converter/adapter/acl/infer/flash_attention_infer.cc index 3b738d7f..3fa45543 100644 --- a/mindspore-lite/tools/converter/adapter/acl/infer/flash_attention_infer.cc +++ b/mindspore-lite/tools/converter/adapter/acl/infer/flash_attention_infer.cc @@ -21,7 +21,7 @@ #include "tools/converter/adapter/acl/common/acl_types.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { constexpr int kNumInputSize = 3; @@ -63,9 +63,9 @@ Status FlashAttentionInfer::Infer(std::vector *inputs, std: return kSuccess; } } // namespace kernel -} // namespace mindspore -namespace mindspore { +} // namespace mindspore::lite +namespace mindspore::lite { namespace kernel { REGISTER_CUSTOM_KERNEL_INTERFACE(ACL, FlashAttention, FlashAttentionInferCreater); } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/infer/flash_attention_infer.h b/mindspore-lite/tools/converter/adapter/acl/infer/flash_attention_infer.h index bac9f669..4e5140f0 100644 --- a/mindspore-lite/tools/converter/adapter/acl/infer/flash_attention_infer.h +++ b/mindspore-lite/tools/converter/adapter/acl/infer/flash_attention_infer.h @@ -21,7 +21,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class FlashAttentionInfer : public mindspore::kernel::KernelInterface { public: @@ -33,5 +33,5 @@ class FlashAttentionInfer : public mindspore::kernel::KernelInterface { const mindspore::schema::Primitive *primitive) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_ACL_INFER_FLASHATTENTIONINFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/acl/infer/forward_rasterize_infer.cc b/mindspore-lite/tools/converter/adapter/acl/infer/forward_rasterize_infer.cc index b85d4eb4..0c658ea2 100644 --- a/mindspore-lite/tools/converter/adapter/acl/infer/forward_rasterize_infer.cc +++ b/mindspore-lite/tools/converter/adapter/acl/infer/forward_rasterize_infer.cc @@ -21,7 +21,7 @@ #include "tools/converter/adapter/acl/common/acl_types.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { constexpr uint32_t kNumOutputShape0 = 1; @@ -81,9 +81,9 @@ Status ForwardRasterizeInfer::Infer(std::vector *inputs, st return kSuccess; } } // namespace kernel -} // namespace mindspore -namespace mindspore { +} // namespace mindspore::lite +namespace mindspore::lite { namespace kernel { REGISTER_CUSTOM_KERNEL_INTERFACE(ACL, ForwardRasterize, ForwardRasterizeInferCreater); } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/infer/forward_rasterize_infer.h b/mindspore-lite/tools/converter/adapter/acl/infer/forward_rasterize_infer.h index 8630faa2..c8cb6754 100644 --- a/mindspore-lite/tools/converter/adapter/acl/infer/forward_rasterize_infer.h +++ b/mindspore-lite/tools/converter/adapter/acl/infer/forward_rasterize_infer.h @@ -21,7 +21,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class ForwardRasterizeInfer : public mindspore::kernel::KernelInterface { public: @@ -33,5 +33,5 @@ class ForwardRasterizeInfer : public mindspore::kernel::KernelInterface { const mindspore::schema::Primitive *primitive) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_ACL_INFER_FORWARDRASTERIZE_H_ diff --git a/mindspore-lite/tools/converter/adapter/acl/plugin/acl_pass_plugin.cc b/mindspore-lite/tools/converter/adapter/acl/plugin/acl_pass_plugin.cc index ec9a7b3c..2ae44e86 100644 --- a/mindspore-lite/tools/converter/adapter/acl/plugin/acl_pass_plugin.cc +++ b/mindspore-lite/tools/converter/adapter/acl/plugin/acl_pass_plugin.cc @@ -23,7 +23,7 @@ #include "extendrt/cxx_api/dlutils.h" #endif -namespace mindspore { +namespace mindspore::lite { namespace opt { std::mutex AclPassPlugin::mutex_; @@ -104,4 +104,4 @@ std::shared_ptr AclPassPlugin::CreateAclPassInner(const std::shared_ptr -namespace mindspore { +namespace mindspore::lite { namespace opt { class AclCustomOppInstaller { public: static bool InstallCustomOpp(const std::string &custom_opp_path, const std::string &cann_opp_path); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_ACL_SRC_ACL_CUSTOM_OPP_INSTALLER_H_ diff --git a/mindspore-lite/tools/converter/adapter/acl/src/acl_memory_offload_pass_impl.cc b/mindspore-lite/tools/converter/adapter/acl/src/acl_memory_offload_pass_impl.cc index 6471a73f..3b7d91c2 100644 --- a/mindspore-lite/tools/converter/adapter/acl/src/acl_memory_offload_pass_impl.cc +++ b/mindspore-lite/tools/converter/adapter/acl/src/acl_memory_offload_pass_impl.cc @@ -28,7 +28,7 @@ constexpr auto kCustomPrimTypeACL = "ACL"; constexpr auto kFuncType = "func_type"; constexpr auto kUniqueName = "uniq_name"; } // namespace -namespace mindspore { +namespace mindspore::lite { namespace opt { std::shared_ptr AclMemoryOffloadPassImpl::CreateCustomPrim() { auto custom_prim = std::make_shared(); @@ -130,4 +130,4 @@ bool AclMemoryOffloadPassImpl::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/src/acl_memory_offload_pass_impl.h b/mindspore-lite/tools/converter/adapter/acl/src/acl_memory_offload_pass_impl.h index b9b0c207..ce9c97ff 100644 --- a/mindspore-lite/tools/converter/adapter/acl/src/acl_memory_offload_pass_impl.h +++ b/mindspore-lite/tools/converter/adapter/acl/src/acl_memory_offload_pass_impl.h @@ -20,7 +20,7 @@ #include #include "tools/converter/adapter/acl/src/acl_pass_impl.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AclMemoryOffloadPassImpl : public AclPassImpl { public: @@ -38,5 +38,5 @@ class AclMemoryOffloadPassImpl : public AclPassImpl { FuncGraphPtr CreateSingleOpFuncGraph(const CNodePtr &cnode); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_ACL_SRC_ACL_MEM_OFFLOAD_PASS_IMPL_H_ diff --git a/mindspore-lite/tools/converter/adapter/acl/src/acl_pass_impl.cc b/mindspore-lite/tools/converter/adapter/acl/src/acl_pass_impl.cc index 1f5a5eb5..e6d8aa3e 100644 --- a/mindspore-lite/tools/converter/adapter/acl/src/acl_pass_impl.cc +++ b/mindspore-lite/tools/converter/adapter/acl/src/acl_pass_impl.cc @@ -105,7 +105,7 @@ #include "cxx_api/graph/acl/acl_convert_init_adapter.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { static const std::set kAdjustCnodeName = {"Resize", "Conv2dTransposeFusion", "Concat"}; static const std::map kEnumFormatToStrMap = {{Format::NCHW, "NCHW"}, {Format::NHWC, "NHWC"}}; @@ -1466,4 +1466,4 @@ bool AclPassImpl::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/acl/src/acl_pass_impl.h b/mindspore-lite/tools/converter/adapter/acl/src/acl_pass_impl.h index 74b72e38..bdc0709b 100644 --- a/mindspore-lite/tools/converter/adapter/acl/src/acl_pass_impl.h +++ b/mindspore-lite/tools/converter/adapter/acl/src/acl_pass_impl.h @@ -30,7 +30,7 @@ #include "infer/custom.h" #include "tools/converter/cxx_api/converter_para.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { using mindspore::converter::FmkType; using mindspore::lite::STATUS; @@ -94,5 +94,5 @@ class AclPassImpl { bool is_ptq_quant_ = false; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_ACL_SRC_ACL_PASS_IMPL_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/activation_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/activation_checker.cc index affa9c26..f91fbc8f 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/activation_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/activation_checker.cc @@ -20,7 +20,7 @@ #include "mindapi/base/types.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { const std::unordered_set kSupportedActivationTypes = { @@ -53,4 +53,4 @@ bool ActivationChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::F OpCheckerRegistrar g_ActivationChecker("Activation", new ActivationChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/activation_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/activation_checker.h index aac5a3d0..c6b508ca 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/activation_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/activation_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ActivationChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class ActivationChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_ACTIVATION_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/argmax_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/argmax_checker.cc index f4b573fc..beeba7f9 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/argmax_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/argmax_checker.cc @@ -21,7 +21,7 @@ #include "common/op_attr.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool ArgMaxChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, 1, format, kMaxInputWOf4Dims)) { @@ -61,4 +61,4 @@ bool ArgMaxChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Forma OpCheckerRegistrar g_ArgMaxChecker("ArgMaxFusion", new ArgMaxChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/argmax_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/argmax_checker.h index 4ab803ed..ce275502 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/argmax_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/argmax_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ArgMaxChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class ArgMaxChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_ARGMAX_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/arithmetic_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/arithmetic_checker.cc index 364b6234..2d48f917 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/arithmetic_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/arithmetic_checker.cc @@ -22,7 +22,7 @@ #include "common/check_base.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool ArithmeticChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, 1, format, kMaxInputWOf4Dims)) { @@ -94,4 +94,4 @@ OpCheckerRegistrar g_X_DIV_YChecker("X_DIV_Y", new ArithmeticChecker()); OpCheckerRegistrar g_X_LOG_YChecker("X_LOG_Y", new ArithmeticChecker()); OpCheckerRegistrar g_BiasAddChecker("BiasAdd", new ArithmeticChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/arithmetic_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/arithmetic_checker.h index eaae8519..248c2870 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/arithmetic_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/arithmetic_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ArithmeticChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class ArithmeticChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_ARITHMETIC_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/batchnorm_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/batchnorm_checker.cc index 3fd49e12..a8e2322d 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/batchnorm_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/batchnorm_checker.cc @@ -20,7 +20,7 @@ #include "common/op_enum.h" #include "checker/batchnorm_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool BatchNormChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, 1, format, kMaxInputWOf4Dims)) { @@ -46,4 +46,4 @@ bool BatchNormChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Fo OpCheckerRegistrar g_BatchNormChecker("BatchNorm", new BatchNormChecker()); OpCheckerRegistrar g_FusedBatchNormChecker("FusedBatchNorm", new BatchNormChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/batchnorm_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/batchnorm_checker.h index b7b9f798..d4159e51 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/batchnorm_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/batchnorm_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class BatchNormChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class BatchNormChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_BATCHNORM_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/common_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/common_checker.cc index de1c226f..9b77c465 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/common_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/common_checker.cc @@ -19,7 +19,7 @@ #include #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool CommonChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, 1, format, kMaxInputWOf4Dims)) { @@ -68,4 +68,4 @@ OpCheckerRegistrar g_MaxUnPoolChecker("MaxUnpool", new CommonChecker()); OpCheckerRegistrar g_CustomChecker("Custom", new CommonChecker()); OpCheckerRegistrar g_LSTMChecker("LSTM", new CommonChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/common_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/common_checker.h index 5a11b94e..d55c6580 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/common_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/common_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class CommonChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class CommonChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_COMMON_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/concat_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/concat_checker.cc index b82bf508..08ba9b68 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/concat_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/concat_checker.cc @@ -21,7 +21,7 @@ #include "common/anf_util.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { bool CheckConcatInputW(const ShapeVector &input_shape, int64_t axis, int64_t input_w) { @@ -86,4 +86,4 @@ bool ConcatChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Forma OpCheckerRegistrar g_ConcatChecker("Concat", new ConcatChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/concat_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/concat_checker.h index d63695ec..caa23af9 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/concat_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/concat_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ConcatChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class ConcatChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_CONCAT_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/conv2d_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/conv2d_checker.cc index e60845ca..587978f5 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/conv2d_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/conv2d_checker.cc @@ -23,7 +23,7 @@ #include "infer/cxx_api/conv2d_fusion.h" #include "infer/cxx_api/conv2d_transpose_fusion.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int kMaxGroupNum = 2048; @@ -197,4 +197,4 @@ bool Conv2DFusionChecker::Check(api::CNodePtr op, int32_t output_num, mindspore: OpCheckerRegistrar g_Conv2DFusionChecker("Conv2DFusion", new Conv2DFusionChecker()); OpCheckerRegistrar g_Conv2dTransposeFusionChecker("Conv2dTransposeFusion", new Conv2DFusionChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/conv2d_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/conv2d_checker.h index beb057ad..fd47ab24 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/conv2d_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/conv2d_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class Conv2DFusionChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class Conv2DFusionChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_CONV2D_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/custom_op_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/custom_op_checker.cc index cc025c9e..d2761fcf 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/custom_op_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/custom_op_checker.cc @@ -18,7 +18,7 @@ #include #include -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool CustomOpChecker::Check(api::CNodePtr, int32_t output_num, mindspore::Format) { return true; } OpCheckerRegistrar g_DecBboxChecker("DecBBox", new CustomOpChecker()); @@ -34,4 +34,4 @@ OpCheckerRegistrar g_BiLstmChecker("BiLstm", new CustomOpChecker()); OpCheckerRegistrar g_GruChecker("Gru", new CustomOpChecker()); OpCheckerRegistrar g_NopChecker("Nop", new CustomOpChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/custom_op_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/custom_op_checker.h index d2609c1e..6d5a5aaf 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/custom_op_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/custom_op_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class CustomOpChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class CustomOpChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_CUSTOM_OP_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/eltwise_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/eltwise_checker.cc index 86f6d239..9b5a3233 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/eltwise_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/eltwise_checker.cc @@ -19,7 +19,7 @@ #include #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int kModeSize = 3; @@ -58,4 +58,4 @@ bool EltwiseChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Form OpCheckerRegistrar g_EltwiseChecker("Eltwise", new EltwiseChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/eltwise_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/eltwise_checker.h index c59ccbbe..015176f4 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/eltwise_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/eltwise_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class EltwiseChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class EltwiseChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_ELTWISE_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/exp_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/exp_checker.cc index b925894a..e2a7a639 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/exp_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/exp_checker.cc @@ -20,7 +20,7 @@ #include #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool ExpFusionChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, kInputIndex1, format, kMaxInputWOf4Dims)) { @@ -46,4 +46,4 @@ bool ExpFusionChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Fo OpCheckerRegistrar g_ExpFusionChecker("ExpFusion", new ExpFusionChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/exp_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/exp_checker.h index 101730e3..c42b4ec3 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/exp_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/exp_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ExpFusionChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class ExpFusionChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_EXP_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/flatten_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/flatten_checker.cc index 5471ccf6..61bfe531 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/flatten_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/flatten_checker.cc @@ -20,7 +20,7 @@ #include "common/op_attr.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int kMaxFlattenInputW = 65536; @@ -54,4 +54,4 @@ bool FlattenChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Form } OpCheckerRegistrar g_FlattenChecker("Flatten", new FlattenChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/flatten_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/flatten_checker.h index f2135467..8798f624 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/flatten_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/flatten_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class FlattenChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class FlattenChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_FLATTEN_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/full_connection_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/full_connection_checker.cc index 9fdadf28..cc21bf53 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/full_connection_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/full_connection_checker.cc @@ -21,7 +21,7 @@ #include "common/op_attr.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool FullConnectionChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, 1, format, kMaxInputWOf4Dims)) { @@ -62,4 +62,4 @@ bool FullConnectionChecker::Check(api::CNodePtr op, int32_t output_num, mindspor OpCheckerRegistrar g_FullConnectionChecker("FullConnection", new FullConnectionChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/full_connection_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/full_connection_checker.h index 23394162..412b4bee 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/full_connection_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/full_connection_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class FullConnectionChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class FullConnectionChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_FULL_CONNECTION_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/log_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/log_checker.cc index 09eb4ba0..6314fbc2 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/log_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/log_checker.cc @@ -20,7 +20,7 @@ #include #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool LogChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, kInputIndex1, format, kMaxInputWOf4Dims)) { @@ -48,4 +48,4 @@ bool LogChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format f } OpCheckerRegistrar g_LogChecker("Log", new LogChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/log_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/log_checker.h index 3d591e30..126f0f9f 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/log_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/log_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class LogChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class LogChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_LOG_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/lrn_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/lrn_checker.cc index 2e139c2e..c90caf9d 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/lrn_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/lrn_checker.cc @@ -19,7 +19,7 @@ #include #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int local_size_3 = 3; @@ -58,4 +58,4 @@ bool LRNChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format f OpCheckerRegistrar g_LRNChecker("LRN", new LRNChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/lrn_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/lrn_checker.h index 7ff17df6..a25e7071 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/lrn_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/lrn_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class LRNChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class LRNChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_LRN_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/lstm_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/lstm_checker.cc index 3ee423ab..63c28408 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/lstm_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/lstm_checker.cc @@ -18,7 +18,7 @@ #include #include "common/op_attr.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr uint32_t kLstmMaxNumOutput = 5456; @@ -43,4 +43,4 @@ bool LstmChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format) OpCheckerRegistrar g_LstmChecker("Lstm", new LstmChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/lstm_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/lstm_checker.h index 7a59deb3..1b2b6ff4 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/lstm_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/lstm_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class LstmChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class LstmChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_LSTM_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/mat_mul_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/mat_mul_checker.cc index 569326f7..706ec2ba 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/mat_mul_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/mat_mul_checker.cc @@ -21,7 +21,7 @@ #include "common/op_attr.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { bool CheckInputShapeForMatrix(const api::CNodePtr &cnode, const api::PrimitivePtr &primitive) { @@ -110,4 +110,4 @@ bool MatMulChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Forma OpCheckerRegistrar g_GemmChecker("Gemm", new MatMulChecker()); OpCheckerRegistrar g_MatMulChecker("MatMulFusion", new MatMulChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/mat_mul_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/mat_mul_checker.h index 1d807795..bb03abb3 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/mat_mul_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/mat_mul_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class MatMulChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class MatMulChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_MAT_MUL_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/mvn_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/mvn_checker.cc index 37438101..1213fbeb 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/mvn_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/mvn_checker.cc @@ -21,7 +21,7 @@ #include "common/op_enum.h" #include "common/fetch_content.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool MvnChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, kInputIndex1, format, kMaxInputWOf4Dims)) { @@ -47,4 +47,4 @@ bool MvnChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format f OpCheckerRegistrar g_MvnChecker("Mvn", new MvnChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/mvn_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/mvn_checker.h index f2d0cb8a..84e6e71e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/mvn_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/mvn_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class MvnChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class MvnChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_MVN_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/op_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/op_checker.cc index ce337f32..e2f16d8e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/op_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/op_checker.cc @@ -21,7 +21,7 @@ #include "common/op_enum.h" #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { OpCheckerRegistry::~OpCheckerRegistry() { for (auto ite : checkers) { @@ -130,4 +130,4 @@ bool CheckInputW(const api::CNodePtr &op, size_t index, mindspore::Format format return true; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/op_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/op_checker.h index 56815a0d..a3eb2c09 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/op_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/op_checker.h @@ -32,7 +32,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::STATUS; -namespace mindspore { +namespace mindspore::lite { namespace dpico { class OpChecker { public: @@ -70,6 +70,6 @@ class OpCheckerRegistrar { ~OpCheckerRegistrar() = default; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_OP_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/pooling_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/pooling_checker.cc index 1b9a5f18..bdbe0cd6 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/pooling_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/pooling_checker.cc @@ -21,7 +21,7 @@ #include "common/anf_util.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int kMaxGroupNum = 2048; @@ -135,4 +135,4 @@ bool PoolingChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Form OpCheckerRegistrar g_AvgPoolFusionChecker("AvgPoolFusion", new PoolingChecker()); OpCheckerRegistrar g_MaxPoolFusionChecker("MaxPoolFusion", new PoolingChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/pooling_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/pooling_checker.h index a28a9c5e..066442a8 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/pooling_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/pooling_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class PoolingChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class PoolingChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_POOLING_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/pow_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/pow_checker.cc index 2e713335..c75733bc 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/pow_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/pow_checker.cc @@ -21,7 +21,7 @@ #include "common/fetch_content.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool PowFusionChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, kInputIndex1, format, kMaxInputWOf4Dims)) { @@ -63,4 +63,4 @@ bool PowFusionChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Fo OpCheckerRegistrar g_PowFusionChecker("PowFusion", new PowFusionChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/pow_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/pow_checker.h index 1fa9cac3..a5a10b5c 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/pow_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/pow_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class PowFusionChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class PowFusionChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_POW_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/reduce_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/reduce_checker.cc index a68a7da1..4a368bc3 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/reduce_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/reduce_checker.cc @@ -24,7 +24,7 @@ #include "common/anf_util.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { STATUS GetAxesSet(const api::CNodePtr &op, const ShapeVector &input_shape, const api::PrimitivePtr &primitive, @@ -130,4 +130,4 @@ bool ReduceChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Forma OpCheckerRegistrar g_ReduceChecker("ReduceFusion", new ReduceChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/reduce_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/reduce_checker.h index ae001b34..78537407 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/reduce_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/reduce_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ReduceChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class ReduceChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_REDUCE_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/reshape_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/reshape_checker.cc index 55d95d05..3088fbbe 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/reshape_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/reshape_checker.cc @@ -22,7 +22,7 @@ #include "common/op_enum.h" #include "common/fetch_content.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int kMaxReshapeInputW = 65536; @@ -95,4 +95,4 @@ bool ReshapeChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Form OpCheckerRegistrar g_ReshapeChecker("Reshape", new ReshapeChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/reshape_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/reshape_checker.h index 38e3a1c2..6f69d327 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/reshape_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/reshape_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ReshapeChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class ReshapeChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_RESHAPE_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/resize_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/resize_checker.cc index 53dab666..dda39321 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/resize_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/resize_checker.cc @@ -24,7 +24,7 @@ #include "mindapi/base/types.h" #include "include/registry/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int kMaxOutputWOf4Dims = 2048; @@ -175,4 +175,4 @@ bool ResizeChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Forma OpCheckerRegistrar g_ResizeChecker("Resize", new ResizeChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/resize_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/resize_checker.h index 21cca6f2..8b24c0ad 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/resize_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/resize_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ResizeChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class ResizeChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_RESIZE_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/reverse_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/reverse_checker.cc index ff720f75..8ab2b63e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/reverse_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/reverse_checker.cc @@ -20,7 +20,7 @@ #include #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool ReverseChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, kInputIndex1, format, kMaxInputWOf4Dims)) { @@ -45,4 +45,4 @@ bool ReverseChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Form OpCheckerRegistrar g_ReverseChecker("ReverseV2", new ReverseChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/reverse_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/reverse_checker.h index e5fcf17d..f0cc0cd6 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/reverse_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/reverse_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ReverseChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class ReverseChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_REVERSE_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/scale_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/scale_checker.cc index 9d6da8b4..98af9d28 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/scale_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/scale_checker.cc @@ -20,7 +20,7 @@ #include #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int kNegativeAxisCorrespondZero = -4; @@ -51,4 +51,4 @@ bool ScaleChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format OpCheckerRegistrar g_ScaleChecker("ScaleFusion", new ScaleChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/scale_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/scale_checker.h index 65aa51b9..945536d6 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/scale_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/scale_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ScaleChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class ScaleChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_SCALE_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/slice_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/slice_checker.cc index 900f4d3c..de990205 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/slice_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/slice_checker.cc @@ -19,7 +19,7 @@ #include #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int kMaxSplitSize = 31; @@ -57,4 +57,4 @@ bool SliceChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format OpCheckerRegistrar g_SliceChecker("SliceFusion", new SliceChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/slice_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/slice_checker.h index a72c1c69..4743f289 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/slice_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/slice_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class SliceChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class SliceChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_SLICE_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/softmax_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/softmax_checker.cc index ce8c0b6e..6e8700fa 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/softmax_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/softmax_checker.cc @@ -21,7 +21,7 @@ #include "common/op_enum.h" #include "common/check_base.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr int kMaxVectorC = 65536; @@ -89,4 +89,4 @@ bool SoftmaxChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Form OpCheckerRegistrar g_SoftmaxChecker("Softmax", new SoftmaxChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/softmax_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/softmax_checker.h index 1386d487..85a1a299 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/softmax_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/softmax_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class SoftmaxChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class SoftmaxChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_SOFTMAX_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/split_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/split_checker.cc index 55124a92..81c96383 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/split_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/split_checker.cc @@ -18,7 +18,7 @@ #include #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool SplitChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (output_num > kMaxTopNum) { @@ -34,4 +34,4 @@ bool SplitChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format OpCheckerRegistrar g_SplitChecker("Split", new SplitChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/split_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/split_checker.h index 140beec5..12982d71 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/split_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/split_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class SplitChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class SplitChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_SPLIT_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/spp_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/spp_checker.cc index 223feedb..d17f9e5e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/spp_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/spp_checker.cc @@ -18,7 +18,7 @@ #include #include "common/op_attr.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool SppChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format) { auto primitive = api::GetValueNode(op->input(0)); @@ -38,4 +38,4 @@ bool SppChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format) OpCheckerRegistrar g_SppChecker("Spp", new SppChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/spp_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/spp_checker.h index 09c97507..3db1eb27 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/spp_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/spp_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class SppChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class SppChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_SPP_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/squeeze_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/squeeze_checker.cc index 11a243f5..8e38debc 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/squeeze_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/squeeze_checker.cc @@ -20,7 +20,7 @@ #include "common/op_attr.h" #include "common/anf_util.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool SqueezeChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format) { std::vector output_shapes; @@ -41,4 +41,4 @@ bool SqueezeChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Form OpCheckerRegistrar g_SqueezeChecker("Squeeze", new SqueezeChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/squeeze_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/squeeze_checker.h index cc092833..4c942a6e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/squeeze_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/squeeze_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class SqueezeChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class SqueezeChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_SQUEEZE_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/strided_slice_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/strided_slice_checker.cc index 40908570..a85ae93d 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/strided_slice_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/strided_slice_checker.cc @@ -19,7 +19,7 @@ #include #include "include/registry/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool StridedSliceChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format) { auto primitive = api::GetValueNode(op->input(0)); @@ -39,4 +39,4 @@ bool StridedSliceChecker::Check(api::CNodePtr op, int32_t output_num, mindspore: OpCheckerRegistrar g_StridedSliceChecker("StridedSlice", new StridedSliceChecker()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/strided_slice_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/strided_slice_checker.h index 6e698aeb..d5b4d4f7 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/strided_slice_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/strided_slice_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class StridedSliceChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class StridedSliceChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_STRIDED_SLICE_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/transpose_checker.cc b/mindspore-lite/tools/converter/adapter/dpico/checker/transpose_checker.cc index 61cef9c2..1b7d657e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/transpose_checker.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/transpose_checker.cc @@ -23,7 +23,7 @@ #include "common/op_attr.h" #include "common/op_enum.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool TransposeChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) { if (!CheckInputW(op, kInputIndex1, format, kMaxInputWOf4Dims)) { @@ -73,4 +73,4 @@ bool TransposeChecker::Check(api::CNodePtr op, int32_t output_num, mindspore::Fo OpCheckerRegistrar g_TransposeChecker("Transpose", new TransposeChecker()); // Permute } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/checker/transpose_checker.h b/mindspore-lite/tools/converter/adapter/dpico/checker/transpose_checker.h index d62271e9..dd57e5ba 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/checker/transpose_checker.h +++ b/mindspore-lite/tools/converter/adapter/dpico/checker/transpose_checker.h @@ -20,7 +20,7 @@ #include #include "checker/op_checker.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class TransposeChecker : public OpChecker { public: @@ -29,6 +29,6 @@ class TransposeChecker : public OpChecker { bool Check(api::CNodePtr op, int32_t output_num, mindspore::Format format) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_CHECKER_TRANSPOSE_CHECKER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/anf_util.cc b/mindspore-lite/tools/converter/adapter/dpico/common/anf_util.cc index f9bb0c8a..0671352d 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/anf_util.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/common/anf_util.cc @@ -29,12 +29,12 @@ #include "infer/tuple_get_item.h" #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" #include "common/check_base.h" -namespace mindspore { +namespace mindspore::lite { namespace ops { class PrimitiveC; } -} // namespace mindspore -namespace mindspore { +} // namespace mindspore::lite +namespace mindspore::lite { namespace dpico { namespace { const std::map kTypeMap = { @@ -798,4 +798,4 @@ STATUS GetShapeVectorFromStringTensor(const api::TensorPtr &tensor_info, ShapeVe return RET_OK; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/anf_util.h b/mindspore-lite/tools/converter/adapter/dpico/common/anf_util.h index 2651a4c0..5f290b05 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/anf_util.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/anf_util.h @@ -30,7 +30,7 @@ using mindspore::lite::RET_NO_CHANGE; using mindspore::lite::RET_OK; using mindspore::lite::STATUS; -namespace mindspore { +namespace mindspore::lite { namespace dpico { bool CheckPrimitiveType(const api::AnfNodePtr &node, const api::PrimitivePtr &primitive_type); STATUS GetPrimitiveType(const api::AnfNodePtr &node, std::string *name); @@ -75,6 +75,6 @@ inline size_t IntToSize(int u) { return static_cast(u); } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_ANF_UTIL_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/data_transpose_utils.cc b/mindspore-lite/tools/converter/adapter/dpico/common/data_transpose_utils.cc index 9b3f7c5e..f0c5d171 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/data_transpose_utils.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/common/data_transpose_utils.cc @@ -24,7 +24,7 @@ #include "common/float16.h" #include "mindapi/base/logging.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { const std::unordered_map kTensorFormatMap{ @@ -221,4 +221,4 @@ void TransposeMatrix(float *matrix, int row, int col) { } } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/data_transpose_utils.h b/mindspore-lite/tools/converter/adapter/dpico/common/data_transpose_utils.h index ac0e9562..a429f42e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/data_transpose_utils.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/data_transpose_utils.h @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NO_CHANGE; using mindspore::lite::RET_OK; using mindspore::lite::STATUS; -namespace mindspore { +namespace mindspore::lite { namespace dpico { inline const std::vector kNH2NC = {0, 3, 1, 2}; inline const std::vector kNC2NH = {0, 2, 3, 1}; @@ -84,5 +84,5 @@ STATUS TransFilterFormat(const mindspore::api::TensorPtr &tensor, mindspore::For void TransposeMatrix(float *matrix, int row, int col); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_DATA_TRANSPOSE_UTILS_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/fetch_content.cc b/mindspore-lite/tools/converter/adapter/dpico/common/fetch_content.cc index 806285d3..4e991e46 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/fetch_content.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/common/fetch_content.cc @@ -21,7 +21,7 @@ #include "common/check_base.h" #include "include/securec.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr size_t kTensorListMinSize = 3 * sizeof(int32_t); @@ -106,4 +106,4 @@ int GetDataSizeFromTensor(DataInfo *data_info, int *data_size) { return RET_OK; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/fetch_content.h b/mindspore-lite/tools/converter/adapter/dpico/common/fetch_content.h index b1c3cba2..3ffd3415 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/fetch_content.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/fetch_content.h @@ -22,7 +22,7 @@ #include "mindapi/ir/primitive.h" #include "mindapi/ir/func_graph.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { struct DataInfo { int data_type_; @@ -37,5 +37,5 @@ int FetchDataFromParameterNode(const api::CNodePtr &cnode, size_t index, DataInf int GetDataSizeFromTensor(DataInfo *data_info, int *data_size); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_FETCH_CONTENT_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/file_util.cc b/mindspore-lite/tools/converter/adapter/dpico/common/file_util.cc index 67e73354..36389a88 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/file_util.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/common/file_util.cc @@ -23,7 +23,7 @@ #include #include "mindapi/base/logging.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { #ifdef _WIN32 @@ -171,4 +171,4 @@ int RemoveDir(const std::string &path) { #endif } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/file_util.h b/mindspore-lite/tools/converter/adapter/dpico/common/file_util.h index e1eea461..2b834bc6 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/file_util.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/file_util.h @@ -37,7 +37,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::STATUS; -namespace mindspore { +namespace mindspore::lite { namespace dpico { constexpr size_t kMaximumNumOfFolders = 1000; @@ -77,5 +77,5 @@ int CreateDir(std::string *file_path); int ReadFileToIfstream(const std::string &file_path, std::ifstream *ifstream); int RemoveDir(const std::string &path); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_FILE_UTIL_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/float16.h b/mindspore-lite/tools/converter/adapter/dpico/common/float16.h index f7b970ff..8df4fdd0 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/float16.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/float16.h @@ -31,7 +31,7 @@ using float16 = float16_t; #include // Implement Float16 for mindspore, inspired by Eigen::half. -namespace mindspore { +namespace mindspore::lite { class Float16 { public: static constexpr uint16_t value_mask = 0x7fff; @@ -217,7 +217,7 @@ inline bool operator>=(const Float16 &a, const Float16 &b) { return static_cast< inline std::ostream &operator<<(std::ostream &os, const Float16 &v) { return (os << static_cast(v)); } -} // namespace mindspore +} // namespace mindspore::lite using float16 = mindspore::Float16; diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/format_utils.cc b/mindspore-lite/tools/converter/adapter/dpico/common/format_utils.cc index 85acb304..15f04fc1 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/format_utils.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/common/format_utils.cc @@ -52,7 +52,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_name_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_name_t.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { const std::set kAssignedFormatOpSet = { @@ -87,4 +87,4 @@ std::string FormatEnumToString(mindspore::Format format) { return names[static_cast(format)]; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/format_utils.h b/mindspore-lite/tools/converter/adapter/dpico/common/format_utils.h index 9cc91b3e..6c7beb8a 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/format_utils.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/format_utils.h @@ -23,12 +23,12 @@ #include "include/api/format.h" #include "common/anf_util.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { const std::set &GetAssignedFormatOpSet(); bool IsSpecialType(const api::CNodePtr &cnode); std::string FormatEnumToString(mindspore::Format format); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_FORMAT_UTILS_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/graph_output_name_keeper.cc b/mindspore-lite/tools/converter/adapter/dpico/common/graph_output_name_keeper.cc index fac3072f..40a69fde 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/graph_output_name_keeper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/common/graph_output_name_keeper.cc @@ -25,7 +25,7 @@ #include "infer/depend.h" #include "include/registry/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { GraphOutputNameKeeper *GraphOutputNameKeeper::GetInstance() { static GraphOutputNameKeeper instance; @@ -149,4 +149,4 @@ std::string GraphOutputNameKeeper::GetAnfOutputNameFromOm(const std::string &om_ return om_out_name; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/graph_output_name_keeper.h b/mindspore-lite/tools/converter/adapter/dpico/common/graph_output_name_keeper.h old mode 100755 new mode 100644 index e36d4183..942f45ce --- a/mindspore-lite/tools/converter/adapter/dpico/common/graph_output_name_keeper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/graph_output_name_keeper.h @@ -22,7 +22,7 @@ #include #include "mindapi/ir/func_graph.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class GraphOutputNameKeeper { public: @@ -46,5 +46,5 @@ class GraphOutputNameKeeper { std::map ori_output_info_; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_DPICO_COMMON_GRAPH_OUTPUT_NAME_KEEPER_H diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/infer_util.cc b/mindspore-lite/tools/converter/adapter/dpico/common/infer_util.cc index 71b1c07d..4698cc1a 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/infer_util.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/common/infer_util.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace dpico { int CheckCustomInputOutput(const std::vector *inputs, const std::vector *outputs, const schema::Primitive *primitive) { @@ -111,4 +111,4 @@ int GetOmNetType(const schema::Primitive *primitive, OmNetType *om_net_type) { return RET_OK; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/infer_util.h b/mindspore-lite/tools/converter/adapter/dpico/common/infer_util.h index a0629949..f66f0b94 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/infer_util.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/infer_util.h @@ -23,13 +23,13 @@ #include "schema/model_generated.h" #include "src/graph_split_info.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { int CheckCustomInputOutput(const std::vector *inputs, const std::vector *outputs, const schema::Primitive *primitive); int CheckCustomParam(const schema::Custom *param, const std::string ¶m_name); int GetOmNetType(const schema::Primitive *primitive, OmNetType *om_net_type); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_INFER_UTIL_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/op_attr.h b/mindspore-lite/tools/converter/adapter/dpico/common/op_attr.h index 5fc55c9f..0f6ce6c4 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/op_attr.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/op_attr.h @@ -17,7 +17,7 @@ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_OP_ATTR_H_ #define MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_OP_ATTR_H_ -namespace mindspore { +namespace mindspore::lite { namespace dpico { constexpr auto kAcrossSpatial = "across_spatial"; constexpr auto kAcrossChannels = "across_channels"; @@ -125,5 +125,5 @@ constexpr auto kUseDefaultInitialHFlag = "use_default_initial_h_flag"; constexpr auto kUseGlobalStats = "use_global_stats"; constexpr auto kZoomFactor = "zoom_factor"; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_OP_ATTR_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/op_enum.h b/mindspore-lite/tools/converter/adapter/dpico/common/op_enum.h index 4d4e9df4..8e6197ca 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/op_enum.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/op_enum.h @@ -17,7 +17,7 @@ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_OP_ENUM_H_ #define MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_OP_ENUM_H_ -namespace mindspore { +namespace mindspore::lite { namespace dpico { constexpr size_t kDims1 = 1; constexpr size_t kDims2 = 2; @@ -45,6 +45,6 @@ constexpr int kAxisLowerBound = -4; constexpr int kAxisUpperBound = 3; constexpr size_t kMaxLineCount = 9999; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_OP_ENUM_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/string_util.cc b/mindspore-lite/tools/converter/adapter/dpico/common/string_util.cc index 5d790f1d..61c0b749 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/string_util.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/common/string_util.cc @@ -21,7 +21,7 @@ #include #include "mindapi/base/logging.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { int EraseBlankSpace(std::string *input_string) { if (input_string == nullptr) { @@ -97,4 +97,4 @@ bool IsValidDoubleNum(const std::string &num_str) { return iss.eof() && !iss.fail(); } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/common/string_util.h b/mindspore-lite/tools/converter/adapter/dpico/common/string_util.h index 89386a54..e34f1b97 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/common/string_util.h +++ b/mindspore-lite/tools/converter/adapter/dpico/common/string_util.h @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::STATUS; -namespace mindspore { +namespace mindspore::lite { namespace dpico { int EraseBlankSpace(std::string *input_string); int EraseHeadTailSpace(std::string *input_string); @@ -37,5 +37,5 @@ std::string ReplaceSpecifiedChar(const std::string &origin_str, char origin_ch, bool IsValidUnsignedNum(const std::string &num_str); bool IsValidDoubleNum(const std::string &num_str); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_COMMON_STRING_UTIL_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_common_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_common_infer.cc index 72acfc01..8564e79b 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_common_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_common_infer.cc @@ -25,7 +25,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { std::shared_ptr DpicoCommonInferCreater() { std::shared_ptr infer = std::make_shared(); @@ -83,4 +83,4 @@ REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, Xor, DpicoCommonInferCreater) REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, Mish, DpicoCommonInferCreater) REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, Custom, DpicoCommonInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_common_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_common_infer.h index c79734a5..62c27eb1 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_common_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_common_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoCommonInterface : public KernelInterface { public: @@ -32,6 +32,6 @@ class DpicoCommonInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_COMMON_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_custom_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_custom_infer.cc index cdabbaa9..88532da9 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_custom_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_custom_infer.cc @@ -32,7 +32,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PrimitiveType_Custom; -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { constexpr int kOmParameterNum = 1; @@ -242,4 +242,4 @@ std::shared_ptr CustomInferCreater() { } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, DPICO, CustomInferCreater); } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_custom_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_custom_infer.h index 07b176b9..c3fe29df 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_custom_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_custom_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class CustomInterface : public KernelInterface { public: @@ -32,5 +32,5 @@ class CustomInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_CUSTOM_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_decbbox_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_decbbox_infer.cc index 6d2e93a1..bea2c97a 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_decbbox_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_decbbox_infer.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { constexpr int kDimsOfBbox = 6; // [xmin, ymin, xmax, ymax, score class_id] @@ -97,4 +97,4 @@ Status DpicoDecBBoxInterface::Infer(std::vector *inputs, st } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, DecBBox, DpicoDecBBoxInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_decbbox_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_decbbox_infer.h index 77466fca..0d843a9e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_decbbox_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_decbbox_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoDecBBoxInterface : public KernelInterface { public: @@ -32,6 +32,6 @@ class DpicoDecBBoxInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_DECBBOX_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_detection_output_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_detection_output_infer.cc index 77c90b0f..65f528ce 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_detection_output_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_detection_output_infer.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { constexpr int kDimensionOfBbox = 7; // [image_id, label, confidence, xmin, ymin, xmax, ymax] @@ -60,4 +60,4 @@ Status DpicoDetectionOutputInterface::Infer(std::vector *in } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, DetectionOutput, DpicoDetectionOutputInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_detection_output_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_detection_output_infer.h index c9cf4166..8260ab04 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_detection_output_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_detection_output_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoDetectionOutputInterface : public KernelInterface { public: @@ -32,6 +32,6 @@ class DpicoDetectionOutputInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_DETECTION_OUTPUT_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_extract_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_extract_infer.cc index 45dbc079..e361100e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_extract_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_extract_infer.cc @@ -30,7 +30,7 @@ using mindspore::kernel::KernelInterface; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { std::shared_ptr DpicoExtractInferCreater() { std::shared_ptr infer = std::make_shared(); @@ -115,4 +115,4 @@ Status DpicoExtractInterface::Infer(std::vector *inputs, st } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, Extract, DpicoExtractInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_extract_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_extract_infer.h index b1885199..8bdb5871 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_extract_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_extract_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoExtractInterface : public KernelInterface { public: @@ -32,5 +32,5 @@ class DpicoExtractInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_EXTRACT_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_infer.cc index 15993c65..9ff458bf 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_infer.cc @@ -28,7 +28,7 @@ using mindspore::kernel::KernelInterface; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { constexpr size_t kHiddenLayerSize = 4; @@ -74,4 +74,4 @@ Status DpicoLstmInterface::Infer(std::vector *inputs, std:: } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, Lstm, DpicoLstmInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_infer.h index 24d41703..b57ff7bd 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoLstmInterface : public KernelInterface { public: @@ -32,5 +32,5 @@ class DpicoLstmInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_LSTM_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_onnx_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_onnx_infer.cc old mode 100755 new mode 100644 index 60d4f0f0..03e74436 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_onnx_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_onnx_infer.cc @@ -31,7 +31,7 @@ using mindspore::kernel::KernelInterface; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { std::shared_ptr DpicoLSTMOnnxInferCreater() { std::shared_ptr infer = std::make_shared(); @@ -98,4 +98,4 @@ Status DpicoLSTMOnnxInterface::Infer(std::vector *inputs, } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, LSTM, DpicoLSTMOnnxInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_onnx_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_onnx_infer.h old mode 100755 new mode 100644 index 0f3ea3ff..e621797a --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_onnx_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_lstm_onnx_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoLSTMOnnxInterface : public KernelInterface { public: @@ -32,5 +32,5 @@ class DpicoLSTMOnnxInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // DPICO_INFER_DPICO_LSTMONNX_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_maxunpool_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_maxunpool_infer.cc old mode 100755 new mode 100644 index de0b3b8d..0ac66d70 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_maxunpool_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_maxunpool_infer.cc @@ -31,7 +31,7 @@ using mindspore::kernel::KernelInterface; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { std::shared_ptr DpicoMaxunpoolInferCreater() { std::shared_ptr infer = std::make_shared(); @@ -121,4 +121,4 @@ Status DpicoMaxunpoolInterface::Infer(std::vector *inputs, } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, MaxUnpool, DpicoMaxunpoolInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_maxunpool_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_maxunpool_infer.h old mode 100755 new mode 100644 index f145726d..69017a24 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_maxunpool_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_maxunpool_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoMaxunpoolInterface : public KernelInterface { public: @@ -32,5 +32,5 @@ class DpicoMaxunpoolInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // DPICO_INFER_DPICO_MAXUNPOOL_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_passthrough_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_passthrough_infer.cc index 10fd61bc..385cf09f 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_passthrough_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_passthrough_infer.cc @@ -30,7 +30,7 @@ using mindspore::kernel::KernelInterface; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { std::shared_ptr DpicoPassThroughInferCreater() { std::shared_ptr infer = std::make_shared(); @@ -126,4 +126,4 @@ Status DpicoPassThroughInterface::Infer(std::vector *inputs } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, PassThrough, DpicoPassThroughInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_passthrough_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_passthrough_infer.h index 99af6f14..af3da924 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_passthrough_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_passthrough_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoPassThroughInterface : public KernelInterface { public: @@ -32,5 +32,5 @@ class DpicoPassThroughInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_PASSTHROUGH_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_psroi_pool_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_psroi_pool_infer.cc index 220c3a56..4c0a0021 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_psroi_pool_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_psroi_pool_infer.cc @@ -30,7 +30,7 @@ using mindspore::kernel::KernelInterface; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { std::shared_ptr DpicoPsRoiPoolInferCreater() { std::shared_ptr infer = std::make_shared(); @@ -114,4 +114,4 @@ Status DpicoPsRoiPoolInterface::Infer(std::vector *inputs, } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, PsRoiPool, DpicoPsRoiPoolInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_psroi_pool_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_psroi_pool_infer.h index 1468d25e..e8ee1434 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_psroi_pool_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_psroi_pool_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoPsRoiPoolInterface : public KernelInterface { public: @@ -32,5 +32,5 @@ class DpicoPsRoiPoolInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_PSROI_POOL_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_recurrent_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_recurrent_infer.cc index bfcac01e..adac60c8 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_recurrent_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_recurrent_infer.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { constexpr size_t kGateNum2 = 2; @@ -83,4 +83,4 @@ REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, Rnn, DpicoRecurrentInferCreater) REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, Gru, DpicoRecurrentInferCreater) REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, BiLstm, DpicoRecurrentInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_recurrent_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_recurrent_infer.h index 81572c0d..564cd68e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_recurrent_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_recurrent_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoRecurrentInterface : public KernelInterface { public: @@ -32,6 +32,6 @@ class DpicoRecurrentInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_RECURRENT_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_roi_align_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_roi_align_infer.cc index 74be958a..a1b635ef 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_roi_align_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_roi_align_infer.cc @@ -31,7 +31,7 @@ using mindspore::kernel::KernelInterface; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { std::shared_ptr DpicoRoiAlignInferCreater() { std::shared_ptr infer = std::make_shared(); @@ -125,4 +125,4 @@ Status DpicoRoiAlignInterface::Infer(std::vector *inputs, } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, RoiAlign, DpicoRoiAlignInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_roi_align_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_roi_align_infer.h index 0f161423..3bb69e2c 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_roi_align_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_roi_align_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoRoiAlignInterface : public KernelInterface { public: @@ -32,5 +32,5 @@ class DpicoRoiAlignInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_ROI_ALIGN_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_spp_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_spp_infer.cc index 2e8ae690..d2663629 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_spp_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_spp_infer.cc @@ -30,7 +30,7 @@ using mindspore::kernel::KernelInterface; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { namespace { constexpr auto kSquareNum = 2; @@ -107,4 +107,4 @@ Status DpicoSppInterface::Infer(std::vector *inputs, std::v } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, Spp, DpicoSppInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_spp_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_spp_infer.h index b378235a..c4b2f4f8 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_spp_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_spp_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoSppInterface : public KernelInterface { public: @@ -32,5 +32,5 @@ class DpicoSppInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_UPSAMPLE_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_upsample_infer.cc b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_upsample_infer.cc index 96579735..3ac22a94 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_upsample_infer.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_upsample_infer.cc @@ -31,7 +31,7 @@ using mindspore::kernel::KernelInterface; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace kernel { std::shared_ptr DpicoUpsampleInferCreater() { std::shared_ptr infer = std::make_shared(); @@ -128,4 +128,4 @@ Status DpicoUpsampleInterface::Infer(std::vector *inputs, } REGISTER_CUSTOM_KERNEL_INTERFACE(DPICO, Upsample, DpicoUpsampleInferCreater) } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_upsample_infer.h b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_upsample_infer.h index 1d56db6c..3548d25f 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_upsample_infer.h +++ b/mindspore-lite/tools/converter/adapter/dpico/infer/dpico_upsample_infer.h @@ -20,7 +20,7 @@ #include #include "include/kernel_interface.h" -namespace mindspore { +namespace mindspore::lite { namespace kernel { class DpicoUpsampleInterface : public KernelInterface { public: @@ -32,5 +32,5 @@ class DpicoUpsampleInterface : public KernelInterface { const schema::Primitive *primitive, const kernel::Kernel *kernel) override; }; } // namespace kernel -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_INFER_DPICO_UPSAMPLE_INFER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/bi_lstm_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/bi_lstm_mapper.cc index c4ad6330..251349d5 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/bi_lstm_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/bi_lstm_mapper.cc @@ -22,7 +22,7 @@ #include "common/anf_util.h" #include "op/bi_lstm_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS BiLstmMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -61,4 +61,4 @@ STATUS BiLstmMapper::Map(const api::CNodePtr &cnode, std::vector #include "op/nop_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS NopMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -46,4 +46,4 @@ STATUS NopMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(Nop, NopMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/nop_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/nop_mapper.h index 4466e38b..9c708f73 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/nop_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/nop_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class NopMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class NopMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_NOP_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/reverse_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/reverse_mapper.cc index 992d4c5c..2b3ce7f8 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/reverse_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/reverse_mapper.cc @@ -22,7 +22,7 @@ #include "infer/reverse_v2.h" #include "op/reverse_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS ReverseMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -58,4 +58,4 @@ STATUS ReverseMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -74,4 +74,4 @@ STATUS RoiAlignMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -64,4 +64,4 @@ STATUS SppMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(Spp, SppMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/spp_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/spp_mapper.h index a2fa02d7..b0bd1afa 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/spp_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/legacy_ops/spp_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class SppMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class SppMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_SPP_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/abs_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/abs_mapper.cc index 7ef54dbc..daaf612d 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/abs_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/abs_mapper.cc @@ -22,7 +22,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" #include "op/absval_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS AbsMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -52,4 +52,4 @@ STATUS AbsMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(Abs, AbsMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/abs_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/abs_mapper.h index fff4d4b0..e2ca7af5 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/abs_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/abs_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class AbsMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class AbsMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_ABS_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/acos_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/acos_mapper.cc index 04469952..fbc22fa8 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/acos_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/acos_mapper.cc @@ -23,7 +23,7 @@ #include "op/acos_operator.h" #include "parser/onnx/onnx_acos_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS AcosMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -44,4 +44,4 @@ STATUS AcosMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Acos, AcosMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/acos_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/acos_mapper.h index e831ebe5..e3f646dd 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/acos_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/acos_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class AcosMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class AcosMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_ACOS_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/acosh_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/acosh_mapper.cc index a1d5f91f..90ecda9b 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/acosh_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/acosh_mapper.cc @@ -23,7 +23,7 @@ #include "op/acosh_operator.h" #include "parser/onnx/onnx_acosh_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS AcoshMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -44,4 +44,4 @@ STATUS AcoshMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Acosh, AcoshMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/acosh_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/acosh_mapper.h index 6c37f141..a1f540a2 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/acosh_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/acosh_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class AcoshMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class AcoshMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_ACOSH_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/activation_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/activation_mapper.cc index 60eaa663..8678444f 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/activation_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/activation_mapper.cc @@ -28,7 +28,7 @@ #include "op/clip_operator.h" #include "op/elu_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr float kNum6 = 6.0; @@ -169,4 +169,4 @@ STATUS ActivationMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -67,4 +67,4 @@ STATUS ArgMaxMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -44,4 +44,4 @@ STATUS AsinhMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Asinh, AsinhMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/asinh_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/asinh_mapper.h index 20f9282a..e328cd83 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/asinh_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/asinh_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class AsinhMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class AsinhMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_ASINH_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/atanh_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/atanh_mapper.cc index 7e75c390..1608bf66 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/atanh_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/atanh_mapper.cc @@ -23,7 +23,7 @@ #include "op/atanh_operator.h" #include "parser/onnx/onnx_atanh_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS AtanhMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -44,4 +44,4 @@ STATUS AtanhMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Atanh, AtanhMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/atanh_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/atanh_mapper.h index 07b28573..e6fcfa3a 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/atanh_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/atanh_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class AtanhMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class AtanhMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_ATANH_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/batch_norm_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/batch_norm_mapper.cc index 255ab517..5ddccfb9 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/batch_norm_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/batch_norm_mapper.cc @@ -24,7 +24,7 @@ #include "common/op_enum.h" #include "op/batch_norm_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { // BatchNorm: {BNMeanIndex:2, BNVarIndex:3, ScaleFactorIndex:4} @@ -161,4 +161,4 @@ STATUS BatchNormMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Bias, BiasMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/bias_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/bias_mapper.h index 5dc5b65e..e040f711 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/bias_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/bias_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class BiasMapper : public OpMapper { public: @@ -32,5 +32,5 @@ class BiasMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_BIAS_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/bitshift_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/bitshift_mapper.cc index b4e8bdfe..c6aeddd4 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/bitshift_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/bitshift_mapper.cc @@ -24,7 +24,7 @@ #include "op/bit_shift_operator.h" #include "parser/onnx/onnx_bitshift_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS BitShiftMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -55,4 +55,4 @@ STATUS BitShiftMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -47,4 +47,4 @@ STATUS BnllMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Bnll, BnllMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/bnll_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/bnll_mapper.h index d8f5846c..ad993997 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/bnll_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/bnll_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class BnllMapper : public OpMapper { public: @@ -32,5 +32,5 @@ class BnllMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_BNLL_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/cast_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/cast_mapper.cc index 749da2e2..0d3eb6ec 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/cast_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/cast_mapper.cc @@ -21,7 +21,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" #include "op/cast_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS CastMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -46,4 +46,4 @@ STATUS CastMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Cast, CastMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/cast_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/cast_mapper.h index 570835a2..09f79804 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/cast_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/cast_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class CastMapper : public OpMapper { public: @@ -32,5 +32,5 @@ class CastMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_CAST_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/clip_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/clip_mapper.cc index 940270a6..3f9010cb 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/clip_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/clip_mapper.cc @@ -21,7 +21,7 @@ #include "infer/clip.h" #include "op/clip_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS ClipMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -58,4 +58,4 @@ STATUS ClipMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Clip, ClipMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/clip_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/clip_mapper.h index c5165e6a..bbefdf27 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/clip_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/clip_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ClipMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class ClipMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_CLIP_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/concat_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/concat_mapper.cc index a0b45a17..0f78f85f 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/concat_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/concat_mapper.cc @@ -21,7 +21,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" #include "op/concat_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS ConcatMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -53,4 +53,4 @@ STATUS ConcatMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Conv2DFusion, ConvMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/conv_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/conv_mapper.h index c08beace..c37efa61 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/conv_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/conv_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ConvMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class ConvMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_CONV_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/cosh_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/cosh_mapper.cc index b81e4085..10034edd 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/cosh_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/cosh_mapper.cc @@ -23,7 +23,7 @@ #include "op/cosh_operator.h" #include "parser/onnx/onnx_cosh_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS CoshMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -44,4 +44,4 @@ STATUS CoshMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Cosh, CoshMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/cosh_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/cosh_mapper.h index 6831feb9..bce82f76 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/cosh_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/cosh_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class CoshMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class CoshMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_COSH_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/crop_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/crop_mapper.cc index dd307708..f1ce0e70 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/crop_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/crop_mapper.cc @@ -22,7 +22,7 @@ #include "infer/crop.h" #include "op/crop_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS CropMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -61,4 +61,4 @@ STATUS CropMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Crop, CropMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/crop_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/crop_mapper.h index b43689e3..36a19927 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/crop_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/crop_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class CropMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class CropMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_CROP_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/custom_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/custom_mapper.cc index 997975b3..3a895636 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/custom_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/custom_mapper.cc @@ -24,7 +24,7 @@ #include "op/custom_operator.h" #include "infer/custom.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { custom::ExtendedParam GetParamFromAttrs(const api::SharedPtr &custom_prim, int index) { @@ -110,4 +110,4 @@ STATUS CustomMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -78,4 +78,4 @@ STATUS DecBBoxMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -78,4 +78,4 @@ STATUS DetectionOutputMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -73,4 +73,4 @@ STATUS EltwiseMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -64,4 +64,4 @@ STATUS ExpMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(ExpFusion, ExpMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/exp_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/exp_mapper.h index c190c8df..fd47a1a0 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/exp_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/exp_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ExpMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class ExpMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_EXP_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/extract_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/extract_mapper.cc index 953005ce..f51e45b6 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/extract_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/extract_mapper.cc @@ -22,7 +22,7 @@ #include "common/op_attr.h" #include "op/extract_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS ExtractMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -58,4 +58,4 @@ STATUS ExtractMapper::Map(const api::CNodePtr &cnode, std::vector *b } REG_MAPPER(FullConnection, FCMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/fc_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/fc_mapper.h index 5a028e02..fd34e1d7 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/fc_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/fc_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class FCMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class FCMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_FC_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/flatten_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/flatten_mapper.cc index 571b77a0..2ec42f05 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/flatten_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/flatten_mapper.cc @@ -23,7 +23,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" #include "op/flatten_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS FlattenMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -59,4 +59,4 @@ STATUS FlattenMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -48,4 +48,4 @@ STATUS GatherElementsMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -52,4 +52,4 @@ STATUS GruMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(Gru, GruMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/gru_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/gru_mapper.h index 09d5a64f..1b0b2309 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/gru_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/gru_mapper.h @@ -21,7 +21,7 @@ #include #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class GruMapper : public OpMapper { public: @@ -31,6 +31,6 @@ class GruMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_GRU_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/hardmax_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/hardmax_mapper.cc index eb8a2b9f..5a145490 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/hardmax_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/hardmax_mapper.cc @@ -24,7 +24,7 @@ #include "op/hardmax_operator.h" #include "parser/onnx/onnx_hardmax_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS HardmaxMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -65,4 +65,4 @@ STATUS HardmaxMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -49,4 +49,4 @@ STATUS HardSigmoidMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -70,4 +70,4 @@ STATUS InterpMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -61,4 +61,4 @@ STATUS LogMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(Log, LogMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/log_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/log_mapper.h index 562788b6..bc60cbd5 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/log_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/log_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class LogMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class LogMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_LOG_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/lrn_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/lrn_mapper.cc index 9f71a566..19ce1a13 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/lrn_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/lrn_mapper.cc @@ -23,7 +23,7 @@ #include "infer/lrn.h" #include "op/lrn_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS LrnMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -64,4 +64,4 @@ STATUS LrnMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(LRN, LrnMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/lrn_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/lrn_mapper.h index 15dcf4b7..0e27731d 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/lrn_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/lrn_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class LrnMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class LrnMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_LRN_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/lstm_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/lstm_mapper.cc index 31fd46d1..d0cd7a38 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/lstm_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/lstm_mapper.cc @@ -22,7 +22,7 @@ #include "common/anf_util.h" #include "op/lstm_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { constexpr int kNums1 = 1; constexpr int kNums2 = 2; @@ -96,4 +96,4 @@ STATUS LstmMapper::Map(const api::CNodePtr &cnode, std::vector REG_MAPPER(Lstm, LstmMapper) REG_MAPPER(LSTM, LstmMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/lstm_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/lstm_mapper.h index 77f784ec..2a873ad8 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/lstm_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/lstm_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class LstmMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class LstmMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_LSTM_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/mat_mul_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/mat_mul_mapper.cc index 63053488..3849e822 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/mat_mul_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/mat_mul_mapper.cc @@ -23,7 +23,7 @@ #include "common/anf_util.h" #include "op/matrix_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { STATUS DoMaxtixOperatorMap(const api::CNodePtr &cnode, std::vector *base_operators, @@ -94,4 +94,4 @@ STATUS MatMulMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -69,4 +69,4 @@ STATUS MaxUnpoolMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -48,4 +48,4 @@ STATUS MishMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Mish, MishMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/mish_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/mish_mapper.h index f41e7950..4f78420e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/mish_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/mish_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class MishMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class MishMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_MISH_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/mod_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/mod_mapper.cc index 1be94b9e..ebdeecb4 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/mod_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/mod_mapper.cc @@ -23,7 +23,7 @@ #include "op/mod_operator.h" #include "parser/onnx/onnx_mod_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS ModMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -50,4 +50,4 @@ STATUS ModMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(Mod, ModMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/mod_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/mod_mapper.h index 83ff47b7..7dc99b73 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/mod_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/mod_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ModMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class ModMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_MOD_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/mvn_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/mvn_mapper.cc index 832213ac..81340b9f 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/mvn_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/mvn_mapper.cc @@ -22,7 +22,7 @@ #include "common/op_attr.h" #include "op/mvn_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS MvnMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -67,4 +67,4 @@ STATUS MvnMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(Mvn, MvnMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/mvn_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/mvn_mapper.h index 9d4d8b3d..256af2d4 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/mvn_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/mvn_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class MvnMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class MvnMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_MVN_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/normalize_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/normalize_mapper.cc index 9e0dbfc7..09d34374 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/normalize_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/normalize_mapper.cc @@ -22,7 +22,7 @@ #include "common/op_attr.h" #include "op/normalize_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { STATUS SetNormalizeDataInfo(const api::CNodePtr &cnode, mapper::NormalizeOperator *normalize_operator) { @@ -102,4 +102,4 @@ STATUS NormalizeMapper::Map(const api::CNodePtr &cnode, std::vector; class OpMapper { @@ -61,6 +61,6 @@ STATUS SetOnnxLstmOffLineArgs(mapper::RecurrentOperator *recurrent_operator, siz const vector &shape_vec, const float *data); STATUS PushOfflineArgs(const api::CNodePtr &cnode, mapper::BaseOperator *base_operator, size_t offline_args_size); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_OP_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/op_mapper_registry.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/op_mapper_registry.cc index 51df42ca..ff9bc94d 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/op_mapper_registry.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/op_mapper_registry.cc @@ -17,7 +17,7 @@ #include "mapper/op_mapper_registry.h" #include -namespace mindspore { +namespace mindspore::lite { namespace dpico { OpMapperRegistry::OpMapperRegistry() = default; @@ -34,4 +34,4 @@ OpMapperPtr OpMapperRegistry::GetOpMapper(const std::string &name) { return nullptr; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/op_mapper_registry.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/op_mapper_registry.h index c6eead7b..ffa09881 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/op_mapper_registry.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/op_mapper_registry.h @@ -23,7 +23,7 @@ #include #include "mapper/op_mapper.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class OpMapperRegistry { public: @@ -49,5 +49,5 @@ class OpMapperRegistrar { #define REG_MAPPER(primitive_type, mapper) \ static OpMapperRegistrar g_##primitive_type##MapperReg(#primitive_type, std::make_shared()); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_OP_MAPPER_REGISTRY_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/pad_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/pad_mapper.cc index b4ddee49..0e67354c 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/pad_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/pad_mapper.cc @@ -23,7 +23,7 @@ #include "op/pad_operator.h" #include "infer/cxx_api/pad_fusion.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { STATUS SetPadDataInfo(const api::CNodePtr &cnode, mapper::PadOperator *pad_operator) { @@ -96,4 +96,4 @@ STATUS PadMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(PadFusion, PadMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/pad_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/pad_mapper.h index 863cc3a4..58efd41b 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/pad_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/pad_mapper.h @@ -24,7 +24,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class PadMapper : public OpMapper { public: @@ -34,6 +34,6 @@ class PadMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_PAD_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/passthrough_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/passthrough_mapper.cc index 19872272..7235d65e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/passthrough_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/passthrough_mapper.cc @@ -22,7 +22,7 @@ #include "common/anf_util.h" #include "op/passthrough_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS PassThroughMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -61,4 +61,4 @@ STATUS PassThroughMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -95,4 +95,4 @@ STATUS PermuteMapper::Map(const api::CNodePtr &cnode, std::vector REG_MAPPER(AvgPoolFusion, PoolMapper) REG_MAPPER(MaxPoolFusion, PoolMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/pool_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/pool_mapper.h index f40203c4..162f5de7 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/pool_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/pool_mapper.h @@ -24,7 +24,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class PoolMapper : public OpMapper { public: @@ -34,6 +34,6 @@ class PoolMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_POOL_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/power_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/power_mapper.cc index 67823ad5..235b0796 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/power_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/power_mapper.cc @@ -24,7 +24,7 @@ #include "infer/cxx_api/pow_fusion.h" #include "op/power_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS PowerMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -78,4 +78,4 @@ STATUS PowerMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(PowFusion, PowerMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/power_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/power_mapper.h index 7fec797f..c12f6673 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/power_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/power_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class PowerMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class PowerMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_POWER_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/prelu_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/prelu_mapper.cc index 3fdf6e09..ee8b2d06 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/prelu_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/prelu_mapper.cc @@ -24,7 +24,7 @@ #include "infer/cxx_api/prelu_fusion.h" #include "op/prelu_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { STATUS SetPReluDataInfo(const api::CNodePtr &cnode, const api::PrimitivePtr &prim, @@ -87,4 +87,4 @@ STATUS PReluMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(PReLUFusion, PReluMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/prelu_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/prelu_mapper.h index 2d4dfcd2..fcc76968 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/prelu_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/prelu_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class PReluMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class PReluMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_PRELU_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/psroi_pool_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/psroi_pool_mapper.cc index 759e6341..156d82a1 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/psroi_pool_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/psroi_pool_mapper.cc @@ -22,7 +22,7 @@ #include "common/anf_util.h" #include "op/ps_roi_pool_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS PsRoiPoolMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -58,4 +58,4 @@ STATUS PsRoiPoolMapper::Map(const api::CNodePtr &cnode, std::vector &reduction_prim, @@ -133,4 +133,4 @@ STATUS ReductionMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -98,4 +98,4 @@ STATUS ReshapeMapper::Map(const api::CNodePtr &cnode, std::vector kCoordinateModeMap = { @@ -212,4 +212,4 @@ STATUS ResizeMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -57,4 +57,4 @@ STATUS RnnMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(Rnn, RnnMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/rnn_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/rnn_mapper.h index a2da0522..5be9ab75 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/rnn_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/rnn_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class RnnMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class RnnMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_RNN_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/roi_pool_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/roi_pool_mapper.cc index 120ff8bf..f4a56370 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/roi_pool_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/roi_pool_mapper.cc @@ -21,7 +21,7 @@ #include "infer/roi_pooling.h" #include "op/roi_pool_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS RoiPoolMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -59,4 +59,4 @@ STATUS RoiPoolMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(ScaleFusion, ScaleMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/scale_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/scale_mapper.h index e6e75266..4472f599 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/scale_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/scale_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ScaleMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class ScaleMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_SCALE_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/shape_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/shape_mapper.cc index 8c7d5b28..fff56515 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/shape_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/shape_mapper.cc @@ -22,7 +22,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" #include "op/shape_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS ShapeMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -47,4 +47,4 @@ STATUS ShapeMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Shape, ShapeMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/shape_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/shape_mapper.h index 25fdf9d8..7df3bb9b 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/shape_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/shape_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class ShapeMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class ShapeMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_SHAPE_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/shrink_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/shrink_mapper.cc index 3e1c64ac..74ab07d1 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/shrink_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/shrink_mapper.cc @@ -23,7 +23,7 @@ #include "op/shrink_operator.h" #include "parser/onnx/onnx_shrink_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS ShrinkMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -50,4 +50,4 @@ STATUS ShrinkMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -52,4 +52,4 @@ STATUS ShuffleChannelMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -44,4 +44,4 @@ STATUS SinhMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Sinh, SinhMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/sinh_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/sinh_mapper.h index a51a08ea..ae58bcc5 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/sinh_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/sinh_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class SinhMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class SinhMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_SINH_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/slice_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/slice_mapper.cc index 73def632..8585dc4d 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/slice_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/slice_mapper.cc @@ -26,7 +26,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" #include "op/slice_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS SliceMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -113,4 +113,4 @@ STATUS SliceMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Split, SliceMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/slice_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/slice_mapper.h index 75777440..31a840b6 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/slice_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/slice_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class SliceMapper : public OpMapper { public: @@ -32,5 +32,5 @@ class SliceMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_SLICE_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/softmax_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/softmax_mapper.cc index 13b6cebc..cd0970a3 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/softmax_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/softmax_mapper.cc @@ -23,7 +23,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" #include "op/softmax_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS SoftmaxMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -62,4 +62,4 @@ STATUS SoftmaxMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -47,4 +47,4 @@ STATUS SoftSignMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -47,4 +47,4 @@ STATUS SqrtMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(Sqrt, SqrtMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/sqrt_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/sqrt_mapper.h index 56907526..09268824 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/sqrt_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/sqrt_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class SqrtMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class SqrtMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_SQRT_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/squeeze_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/squeeze_mapper.cc index c2ff3945..71e24bf6 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/squeeze_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/squeeze_mapper.cc @@ -22,7 +22,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" #include "op/squeeze_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS SqueezeMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -62,4 +62,4 @@ STATUS SqueezeMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -52,4 +52,4 @@ STATUS ThresholdMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -98,4 +98,4 @@ STATUS TileMapper::Map(const api::CNodePtr &cnode, std::vector } REG_MAPPER(TileFusion, TileMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/tile_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/tile_mapper.h index 6893726b..539bef5e 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/tile_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/tile_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class TileMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class TileMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_TILE_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/unsqueeze_mapper.cc b/mindspore-lite/tools/converter/adapter/dpico/mapper/unsqueeze_mapper.cc index 8524880a..6b8bc45d 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/unsqueeze_mapper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/unsqueeze_mapper.cc @@ -23,7 +23,7 @@ #include "infer/unsqueeze.h" #include "op/unsqueeze_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { STATUS UnsqueezeMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -66,4 +66,4 @@ STATUS UnsqueezeMapper::Map(const api::CNodePtr &cnode, std::vector *base_operators, const api::PrimitivePtr &prim, const api::CNodePtrList &output_cnodes) { @@ -63,4 +63,4 @@ STATUS UpsampleMapper::Map(const api::CNodePtr &cnode, std::vector * } REG_MAPPER(Xor, XorMapper) } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/mapper/xor_mapper.h b/mindspore-lite/tools/converter/adapter/dpico/mapper/xor_mapper.h index 52e7f522..043ea8ee 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/mapper/xor_mapper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/mapper/xor_mapper.h @@ -22,7 +22,7 @@ #include "mapper/op_mapper.h" #include "mapper/op_mapper_registry.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class XorMapper : public OpMapper { public: @@ -32,6 +32,6 @@ class XorMapper : public OpMapper { const api::CNodePtrList &output_cnodes) override; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_MAPPER_XOR_MAPPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/parser/detection_output_param_helper.cc b/mindspore-lite/tools/converter/adapter/dpico/parser/detection_output_param_helper.cc index 702add48..b9059372 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/parser/detection_output_param_helper.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/parser/detection_output_param_helper.cc @@ -26,7 +26,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { int GetProposalParamType(mapper::ProposalParamType *proposal_param_type, @@ -261,4 +261,4 @@ int GetDetectionOutputParamFromAttrs(std::vector * return RET_OK; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/parser/detection_output_param_helper.h b/mindspore-lite/tools/converter/adapter/dpico/parser/detection_output_param_helper.h index 694e65fa..4c8caf28 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/parser/detection_output_param_helper.h +++ b/mindspore-lite/tools/converter/adapter/dpico/parser/detection_output_param_helper.h @@ -25,12 +25,12 @@ #include "infer/custom.h" #include "./pico_caffe.pb.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { int SetAttrsByDetectionOutputParam(const std::shared_ptr &custom_prim, const caffe::LayerParameter &proto); int SetAttrsByDecBboxParam(const std::shared_ptr &custom_prim, const caffe::LayerParameter &proto); int GetDetectionOutputParamFromAttrs(std::vector *detection_params, const api::SharedPtr &custom_prim); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_PARSER_DETECTION_OUTPUT_PARAM_HELPER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/calib_data_generator.cc b/mindspore-lite/tools/converter/adapter/dpico/src/calib_data_generator.cc index 5a1f2270..618cfd7c 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/calib_data_generator.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/src/calib_data_generator.cc @@ -31,7 +31,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr size_t kMaxSize = 1024; @@ -309,4 +309,4 @@ int CalibDataGenerator::Run(const api::AnfNodePtrList &graph_inputs, const api:: return RET_OK; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/calib_data_generator.h b/mindspore-lite/tools/converter/adapter/dpico/src/calib_data_generator.h index a1ae9358..e02b8505 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/calib_data_generator.h +++ b/mindspore-lite/tools/converter/adapter/dpico/src/calib_data_generator.h @@ -32,7 +32,7 @@ #include "include/errorcode.h" #include "common/check_base.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { constexpr auto kNchw2Nhwc = "nchw2nhwc"; constexpr auto kNhwc2Nchw = "nhwc2nchw"; @@ -157,6 +157,6 @@ class CalibDataGenerator { std::map> control_flow_inputs_; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_SRC_CALIB_DATA_GENERATOR_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/custom_creator.cc b/mindspore-lite/tools/converter/adapter/dpico/src/custom_creator.cc index 35224fd7..b0a72f27 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/custom_creator.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/src/custom_creator.cc @@ -36,7 +36,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { const int kMaxRoiFrameCnt = 300; @@ -463,4 +463,4 @@ STATUS CustomOpCreator::SetCustomMultiOutput(const api::FuncGraphPtr &func_graph return RET_OK; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/custom_creator.h b/mindspore-lite/tools/converter/adapter/dpico/src/custom_creator.h index 2978844e..424af4b0 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/custom_creator.h +++ b/mindspore-lite/tools/converter/adapter/dpico/src/custom_creator.h @@ -27,7 +27,7 @@ #include "include/errorcode.h" using mindspore::lite::STATUS; -namespace mindspore { +namespace mindspore::lite { namespace dpico { using ModelCoreInfoPtr = std::shared_ptr; class CustomOpCreator { @@ -63,5 +63,5 @@ class CustomOpCreator { bool has_unsupported_; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_SRC_CUSTOM_CREATOR_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/data_preprocessor.cc b/mindspore-lite/tools/converter/adapter/dpico/src/data_preprocessor.cc index 3b6a4cee..63c035b7 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/data_preprocessor.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/src/data_preprocessor.cc @@ -30,7 +30,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { const std::unordered_set kRgbInputFormats = {"BGR_PLANAR", "RGB_PLANAR", "RGB_PACKAGE", "BGR_PACKAGE"}; @@ -355,4 +355,4 @@ int DataPreprocessor::Run(const api::AnfNodePtrList &inputs) { return RET_OK; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/data_preprocessor.h b/mindspore-lite/tools/converter/adapter/dpico/src/data_preprocessor.h index 159c1586..6043b64a 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/data_preprocessor.h +++ b/mindspore-lite/tools/converter/adapter/dpico/src/data_preprocessor.h @@ -34,7 +34,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace dpico { class DataPreprocessor { public: @@ -134,6 +134,6 @@ class DataPreprocessor { size_t batch_size_{0}; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_SRC_DATA_PREPROCESSOR_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/dpico_pass.cc b/mindspore-lite/tools/converter/adapter/dpico/src/dpico_pass.cc index a36f1025..507a0fbf 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/dpico_pass.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/src/dpico_pass.cc @@ -40,7 +40,7 @@ #include "src/custom_creator.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { const size_t kMinimumNumbOfSegments = 1; @@ -555,10 +555,10 @@ bool DpicoPass::Execute(const api::FuncGraphPtr &func_graph) { } REG_PASS(DpicoPass, dpico::DpicoPass) } // namespace dpico -} // namespace mindspore -namespace mindspore::registry { +} // namespace mindspore::lite +namespace mindspore::lite::registry { const std::vector schedule_pipe = {"ConstFoldPass", "ToNCHWFormat", "DpicoPreprocessPass", "DecreaseTransposeAlgo", "DumpGraph", "DpicoPass", "ToNHWCFormat"}; REG_SCHEDULED_PASS(POSITION_BEGIN, schedule_pipe) -} // namespace mindspore::registry +} // namespace mindspore::lite::registry diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/dpico_pass.h b/mindspore-lite/tools/converter/adapter/dpico/src/dpico_pass.h index cb262e70..b6703717 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/dpico_pass.h +++ b/mindspore-lite/tools/converter/adapter/dpico/src/dpico_pass.h @@ -30,7 +30,7 @@ #include "src/graph_split_api.h" using mindspore::lite::STATUS; -namespace mindspore { +namespace mindspore::lite { namespace dpico { class DpicoPass : public registry::PassBase { public: @@ -57,6 +57,6 @@ class DpicoPass : public registry::PassBase { std::string dpico_config_path_{}; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_SRC_DPICO_PASS_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/dpico_preprocess_pass.cc b/mindspore-lite/tools/converter/adapter/dpico/src/dpico_preprocess_pass.cc index 45537e58..95d90690 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/dpico_preprocess_pass.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/src/dpico_preprocess_pass.cc @@ -26,7 +26,7 @@ #include "common/op_attr.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { STATUS InsertTransposeBeforeBiasAdd(const api::FuncGraphPtr &func_graph, const api::CNodePtr &cnode, @@ -216,7 +216,7 @@ bool DpicoPreprocessPass::Execute(const api::FuncGraphPtr &func_graph) { return true; } } // namespace dpico -} // namespace mindspore -namespace mindspore::registry { +} // namespace mindspore::lite +namespace mindspore::lite::registry { REG_PASS(DpicoPreprocessPass, dpico::DpicoPreprocessPass) } diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/dpico_preprocess_pass.h b/mindspore-lite/tools/converter/adapter/dpico/src/dpico_preprocess_pass.h index 20751359..89d50d75 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/dpico_preprocess_pass.h +++ b/mindspore-lite/tools/converter/adapter/dpico/src/dpico_preprocess_pass.h @@ -28,7 +28,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::STATUS; -namespace mindspore { +namespace mindspore::lite { namespace dpico { class DpicoPreprocessPass : public registry::PassBase { public: @@ -41,6 +41,6 @@ class DpicoPreprocessPass : public registry::PassBase { STATUS PreProcessBiadAdd(const api::FuncGraphPtr &func_graph, const api::CNodePtr &cnode); }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_SRC_DPICO_PREPROCESS_PASS_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_api.cc b/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_api.cc index ad99c5ea..c70e9b28 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_api.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_api.cc @@ -34,7 +34,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr auto kFormat = "format"; @@ -392,4 +392,4 @@ int FillSubgraphOutputsFormat(Subgraph *subgraph, const api::FuncGraphPtr &func_ return RET_OK; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_api.h b/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_api.h index 3937aecf..59046f0b 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_api.h +++ b/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_api.h @@ -22,7 +22,7 @@ #include #include "mindapi/ir/common.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { struct Subgraph; struct GraphSplitInfo; @@ -32,5 +32,5 @@ api::AnfNodePtrList GetSubgraphInputs(const Subgraph &subgraph, const api::FuncG api::AnfNodePtrList GetSubgraphOutputs(const Subgraph &subgraph, const api::FuncGraphManagerPtr &manager); int FillSubgraphOutputsFormat(Subgraph *subgraph, const api::FuncGraphPtr &func_graph); } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_SRC_GRAPH_SPLITT_API_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_info.h b/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_info.h index 0255a6a6..596dc121 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_info.h +++ b/mindspore-lite/tools/converter/adapter/dpico/src/graph_split_info.h @@ -23,7 +23,7 @@ #include "mindapi/ir/common.h" using ShapeVector = std::vector; -namespace mindspore { +namespace mindspore::lite { namespace dpico { enum OmNetType : int { kCnn = 0, kRoi = 1, kRecurrent = 2 }; @@ -46,5 +46,5 @@ struct GraphSplitInfo { std::map> subgraphs_map; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_SRC_GRAPH_SPLIT_INFO_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/mapper_config_parser.cc b/mindspore-lite/tools/converter/adapter/dpico/src/mapper_config_parser.cc index 88032dea..62f4acb6 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/mapper_config_parser.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/src/mapper_config_parser.cc @@ -23,7 +23,7 @@ #include "common/file_util.h" #include "mindapi/base/logging.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { constexpr size_t kPairSize = 2; @@ -374,4 +374,4 @@ void MapperConfigParser::SetOriginConfigFilePath(const std::string &origin_confi origin_config_file_path_ = origin_config_file_path; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/mapper_config_parser.h b/mindspore-lite/tools/converter/adapter/dpico/src/mapper_config_parser.h index ca8f4f8a..63c53d03 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/mapper_config_parser.h +++ b/mindspore-lite/tools/converter/adapter/dpico/src/mapper_config_parser.h @@ -24,7 +24,7 @@ #include #include "ir/dtype/type_id.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { constexpr auto kInsertOpConf = "[insert_op_conf]"; constexpr auto kInstructionName = "[instruction_name]"; @@ -88,5 +88,5 @@ class MapperConfigParser { std::string internal_stride_ = "16"; }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_SRC_MAPPER_CONFIG_PARSER_H_ diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/om_generator.cc b/mindspore-lite/tools/converter/adapter/dpico/src/om_generator.cc index b2fee070..096925c5 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/om_generator.cc +++ b/mindspore-lite/tools/converter/adapter/dpico/src/om_generator.cc @@ -33,7 +33,7 @@ #include "src/mapper_config_parser.h" #include "src/graph_split_api.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { namespace { const std::unordered_map kMapperSupportedTypes = { @@ -388,4 +388,4 @@ int OmGenerator::Run(const api::FuncGraphPtr &func_graph, const Subgraph &sub_gr return RET_OK; } } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/adapter/dpico/src/om_generator.h b/mindspore-lite/tools/converter/adapter/dpico/src/om_generator.h index 50d089c5..0b377b69 100644 --- a/mindspore-lite/tools/converter/adapter/dpico/src/om_generator.h +++ b/mindspore-lite/tools/converter/adapter/dpico/src/om_generator.h @@ -27,7 +27,7 @@ #include "mapper/op_mapper.h" #include "op/base_operator.h" -namespace mindspore { +namespace mindspore::lite { namespace dpico { class OmGenerator { public: @@ -45,5 +45,5 @@ class OmGenerator { std::vector *base_operators); }; } // namespace dpico -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_ADAPTER_DPICO_SRC_OM_GENERATOR_H_ diff --git a/mindspore-lite/tools/converter/converter_context.cc b/mindspore-lite/tools/converter/converter_context.cc index b9313985..eab1a76a 100644 --- a/mindspore-lite/tools/converter/converter_context.cc +++ b/mindspore-lite/tools/converter/converter_context.cc @@ -18,7 +18,7 @@ #include #include "include/registry/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace converter { constexpr int kMaxInput = 255; @@ -62,4 +62,4 @@ std::map, std::vector> ConverterContext::GetConfigInfo(c return MapStringToVectorChar(external_used_config_infos.at(CharToString(section))); } } // namespace converter -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/converter_lite/converter_flags.cc b/mindspore-lite/tools/converter/converter_lite/converter_flags.cc index 945d74d1..3e0868e7 100644 --- a/mindspore-lite/tools/converter/converter_lite/converter_flags.cc +++ b/mindspore-lite/tools/converter/converter_lite/converter_flags.cc @@ -24,7 +24,7 @@ #include "tools/common/string_util.h" -namespace mindspore::converter { +namespace mindspore::lite::converter { using mindspore::lite::RET_INPUT_PARAM_INVALID; using mindspore::lite::RET_OK; @@ -423,4 +423,4 @@ int Flags::Init(int argc, const char **argv) { return RET_OK; } -} // namespace mindspore::converter +} // namespace mindspore::lite::converter diff --git a/mindspore-lite/tools/converter/converter_lite/converter_flags.h b/mindspore-lite/tools/converter/converter_lite/converter_flags.h index dc1c2570..67e823c9 100644 --- a/mindspore-lite/tools/converter/converter_lite/converter_flags.h +++ b/mindspore-lite/tools/converter/converter_lite/converter_flags.h @@ -24,7 +24,7 @@ #include "include/registry/converter_context.h" #include "tools/common/flag_parser.h" -namespace mindspore { +namespace mindspore::lite { namespace converter { class Flags : public virtual mindspore::lite::FlagParser { public: @@ -93,6 +93,6 @@ class Flags : public virtual mindspore::lite::FlagParser { bool optimizeTransformer = false; }; } // namespace converter -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_CONVERTER_LITE_CONVERTER_FLAGS_H_ diff --git a/mindspore-lite/tools/converter/cxx_api/converter_para.h b/mindspore-lite/tools/converter/cxx_api/converter_para.h index 915b1109..c6c52ca3 100644 --- a/mindspore-lite/tools/converter/cxx_api/converter_para.h +++ b/mindspore-lite/tools/converter/cxx_api/converter_para.h @@ -28,7 +28,7 @@ #include "tools/converter/micro/coder/config.h" #include "src/common/config_infos.h" -namespace mindspore { +namespace mindspore::lite { enum ParallelSplitType { SplitNo = 0, SplitByUserRatio = 1, SplitByUserAttr = 2 }; struct ParallelSplitConfig { @@ -118,5 +118,5 @@ struct ConverterPara { ConfigInfos config_infos; std::vector const_names; }; -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_CXX_API_CONVERTER_PARA_H_ diff --git a/mindspore-lite/tools/converter/import/cast_op_adjust.cc b/mindspore-lite/tools/converter/import/cast_op_adjust.cc index 42cf75f9..65b8252b 100644 --- a/mindspore-lite/tools/converter/import/cast_op_adjust.cc +++ b/mindspore-lite/tools/converter/import/cast_op_adjust.cc @@ -21,7 +21,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { constexpr size_t kCastInputNum = 3; bool GetInOutDataTypeValue(const CNodePtr &cast_cnode, int *output_type_value, int *input_type_value) { @@ -158,4 +158,4 @@ bool CastOpAdjust::Run(const FuncGraphPtr &func_graph, bool strict_mode_flag) { } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/import/cast_op_adjust.h b/mindspore-lite/tools/converter/import/cast_op_adjust.h index c00fb570..9c977d05 100644 --- a/mindspore-lite/tools/converter/import/cast_op_adjust.h +++ b/mindspore-lite/tools/converter/import/cast_op_adjust.h @@ -20,7 +20,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class CastOpAdjust : public Pass { public: @@ -30,5 +30,5 @@ class CastOpAdjust : public Pass { bool Run(const FuncGraphPtr &graph, bool strict_mode_flag); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_IMPORT_CAST_OP_ADJUST_H_ diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/convert_extend_ops_pass.cc b/mindspore-lite/tools/converter/import/convert_extend_ops/convert_extend_ops_pass.cc index ab699104..1013097e 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/convert_extend_ops_pass.cc +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/convert_extend_ops_pass.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_z.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_o.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr auto kNameSumExtPatternName = "SumExtPatternName"; constexpr auto kNameMatMulExtPatternName = "MatMulExtPatternName"; @@ -164,4 +164,4 @@ AnfNodePtr ConvertExtendOpsPass::Process(const std::string &pattern_name, const } return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/convert_extend_ops_pass.h b/mindspore-lite/tools/converter/import/convert_extend_ops/convert_extend_ops_pass.h index e7c5fb80..310c3bee 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/convert_extend_ops_pass.h +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/convert_extend_ops_pass.h @@ -21,7 +21,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { /** * ConvertExtendOpsPass will take effect when some extend operations are found in mindir. @@ -58,5 +58,5 @@ AnfNodePtr ConvertOnesPass(const FuncGraphPtr &func_graph, const mindspore::AnfN AnfNodePtr ConvertZerosPass(const FuncGraphPtr &func_graph, const mindspore::AnfNodePtr &node); AnfNodePtr ConvertMulsPass(const FuncGraphPtr &func_graph, const mindspore::AnfNodePtr &node); } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_IMPORT_CONVERT_EXTEND_OPS_CONVERT_EXTEND_OPS_PASS_H_ diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/dense.cc b/mindspore-lite/tools/converter/import/convert_extend_ops/dense.cc index 5facc5ec..2ab40a4d 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/dense.cc +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/dense.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_d.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { ShapeArray CalcDenseReshapeVector(const ShapeVector &input_shape_vec, const ShapeVector &weight_shape_vec) { ShapeVector input_reshape_vec = {-1, input_shape_vec.back()}; @@ -113,4 +113,4 @@ AnfNodePtr ConvertDensePass(const FuncGraphPtr &func_graph, const mindspore::Anf return output; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/matmul_ext.cc b/mindspore-lite/tools/converter/import/convert_extend_ops/matmul_ext.cc index 893f5710..8e2ea710 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/matmul_ext.cc +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/matmul_ext.cc @@ -26,7 +26,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_b.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kMatMulRank = 2; @@ -245,4 +245,4 @@ AnfNodePtr ConvertMatMulExtPass(const FuncGraphPtr &func_graph, const mindspore: MS_CHECK_TRUE_MSG(output != nullptr, nullptr, "Can't get Reshape output from MatMulExt."); return output; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/max_min.cc b/mindspore-lite/tools/converter/import/convert_extend_ops/max_min.cc index bb8e34a6..00eafc9d 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/max_min.cc +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/max_min.cc @@ -26,7 +26,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { AnfNodePtr ConvertMaxMinPass(const FuncGraphPtr &func_graph, const mindspore::AnfNodePtr &node) { auto max_min_cnode = node->cast(); MS_CHECK_TRUE_RET(max_min_cnode != nullptr, nullptr); @@ -72,4 +72,4 @@ AnfNodePtr ConvertMaxMinPass(const FuncGraphPtr &func_graph, const mindspore::An return reduce_max_min_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/muls.cc b/mindspore-lite/tools/converter/import/convert_extend_ops/muls.cc index 64e2bcec..c867263e 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/muls.cc +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/muls.cc @@ -27,7 +27,7 @@ #include "mindspore/ccsrc/include/common/utils/convert_utils.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { TypeId GetMulsPromoteType(TypeId input_type, TypeId other_type) { std::set kSet = {kNumberTypeUInt8, kNumberTypeInt8, kNumberTypeInt16, kNumberTypeInt32, kNumberTypeInt64}; @@ -95,4 +95,4 @@ AnfNodePtr ConvertMulsPass(const FuncGraphPtr &func_graph, const mindspore::AnfN return mul_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/ones.cc b/mindspore-lite/tools/converter/import/convert_extend_ops/ones.cc index f813c8f1..041ce7b4 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/ones.cc +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/ones.cc @@ -26,7 +26,7 @@ #include "tools/converter/import/convert_extend_ops/convert_extend_ops_pass.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_o.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { AnfNodePtr OnesGetValueByDtype(const FuncGraphPtr &func_graph, const mindspore::CNodePtr &cnode) { auto dtype = cnode->input(kInputIndexTwo); @@ -69,4 +69,4 @@ AnfNodePtr ConvertOnesPass(const FuncGraphPtr &func_graph, const mindspore::AnfN return reduce_ones_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/sum_ext.cc b/mindspore-lite/tools/converter/import/convert_extend_ops/sum_ext.cc index a9c72892..c9311f49 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/sum_ext.cc +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/sum_ext.cc @@ -24,7 +24,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { AnfNodePtr ReduceExtendGetCastInputByDtype(const FuncGraphPtr &func_graph, const mindspore::CNodePtr &cnode) { auto input = cnode->input(kInputIndexOne); @@ -94,4 +94,4 @@ AnfNodePtr ConvertSumExtPass(const FuncGraphPtr &func_graph, const mindspore::An return reduce_sum_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/utils.cc b/mindspore-lite/tools/converter/import/convert_extend_ops/utils.cc index 32688adc..3bb2198c 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/utils.cc +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/utils.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { TypeId GetSingleNodeOutputTypeId(const mindspore::AnfNodePtr &node) { TypePtr type = node->Type(); if (node->isa()) { @@ -110,4 +110,4 @@ AnfNodePtr GetBroadcastToNode(const FuncGraphPtr &func_graph, const mindspore::A return bst_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/utils.h b/mindspore-lite/tools/converter/import/convert_extend_ops/utils.h index 0481c3dd..005399f2 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/utils.h +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/utils.h @@ -19,7 +19,7 @@ #include "ir/func_graph.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { TypeId GetSingleNodeOutputTypeId(const mindspore::AnfNodePtr &node); AnfNodePtr GetCastNode(const FuncGraphPtr &func_graph, const mindspore::AnfNodePtr &node, const TypeId &dst_type_id); @@ -75,5 +75,5 @@ ValueNodePtr GetCastedScalar(const T number, const TypeId &dst_type_id) { return value_node; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_IMPORT_CONVERT_EXTEND_OPS_UTILS_H_ diff --git a/mindspore-lite/tools/converter/import/convert_extend_ops/zeros.cc b/mindspore-lite/tools/converter/import/convert_extend_ops/zeros.cc index 2ac2edb5..445c3caf 100644 --- a/mindspore-lite/tools/converter/import/convert_extend_ops/zeros.cc +++ b/mindspore-lite/tools/converter/import/convert_extend_ops/zeros.cc @@ -26,7 +26,7 @@ #include "tools/converter/import/convert_extend_ops/convert_extend_ops_pass.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_z.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { AnfNodePtr ZerosGetValueByDtype(const FuncGraphPtr &func_graph, const mindspore::CNodePtr &cnode) { auto dtype = cnode->input(kInputIndexTwo); @@ -70,4 +70,4 @@ AnfNodePtr ConvertZerosPass(const FuncGraphPtr &func_graph, const mindspore::Anf return reduce_zeros_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.cc b/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.cc index 636e06e6..8424ebd6 100644 --- a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.cc +++ b/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.cc @@ -22,7 +22,7 @@ #include "include/mpi_sys.h" #include "include/mpi_vb.h" -namespace mindspore { +namespace mindspore::lite { namespace nnie { constexpr int kNNIEMaxNameLen = 128; @@ -234,4 +234,4 @@ void NnieClose(NnieHandle *h) { h->load_model_ = 0; } } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.h b/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.h index 39db6d25..2ce299e7 100644 --- a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.h +++ b/mindspore-lite/tools/converter/micro/providers/nnie/nnie_interfaces.h @@ -18,7 +18,7 @@ #include "src/nnie_common.h" -namespace mindspore { +namespace mindspore::lite { namespace nnie { typedef struct { int load_model_; @@ -45,5 +45,5 @@ int NnieRun(NnieHandle *h, NnieTensors *outputs); void NnieClose(NnieHandle *h); HI_U32 CalcInputSize(const NnieTensors *inputs, const size_t *j); } // namespace nnie -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_MICRO_PROVIDERS_NNIE_NNIE_INTERFACES_H_ diff --git a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.cc b/mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.cc index bd1ff24c..fbb92a61 100644 --- a/mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.cc +++ b/mindspore-lite/tools/converter/micro/providers/nnie/nnie_micro.cc @@ -28,7 +28,7 @@ #include "include/ir/dtype/type_id.h" #include "include/c_api/status_c.h" -namespace mindspore { +namespace mindspore::lite { namespace { using nnie::NnieDataType; using nnie::NnieTensors; @@ -206,7 +206,7 @@ static int ProposalKernel(TensorC *inputs, int input_num, TensorC *outputs, int return 0; } } // namespace proposal -} // namespace mindspore +} // namespace mindspore::lite int CustomKernel(TensorC *inputs, int input_num, TensorC *outputs, int output_num, CustomParameter *param) { if (!strcmp(param->type, "NNIE")) { diff --git a/mindspore-lite/tools/converter/parser/lstm_adjust_pass.cc b/mindspore-lite/tools/converter/parser/lstm_adjust_pass.cc index bf7d862e..67e75082 100644 --- a/mindspore-lite/tools/converter/parser/lstm_adjust_pass.cc +++ b/mindspore-lite/tools/converter/parser/lstm_adjust_pass.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kMindLstmInputs = 5; @@ -305,4 +305,4 @@ bool LstmAdjustPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/parser/lstm_adjust_pass.h b/mindspore-lite/tools/converter/parser/lstm_adjust_pass.h index ceabb430..4c0015dd 100644 --- a/mindspore-lite/tools/converter/parser/lstm_adjust_pass.h +++ b/mindspore-lite/tools/converter/parser/lstm_adjust_pass.h @@ -25,7 +25,7 @@ #include "include/common/utils/utils.h" #include "tools/optimizer/common/format_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class LstmAdjustPass : public Pass { public: @@ -34,5 +34,5 @@ class LstmAdjustPass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_LSTM_ADJUST_PASS_H_ diff --git a/mindspore-lite/tools/converter/parser/pytorch/pytorch_lstm_adjust.cc b/mindspore-lite/tools/converter/parser/pytorch/pytorch_lstm_adjust.cc index 287b426a..ee78ae58 100644 --- a/mindspore-lite/tools/converter/parser/pytorch/pytorch_lstm_adjust.cc +++ b/mindspore-lite/tools/converter/parser/pytorch/pytorch_lstm_adjust.cc @@ -29,7 +29,7 @@ #include "ir/tensor_new.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kTorchLstmInputSize = 4; @@ -277,4 +277,4 @@ bool PytorchLstmAdjustPass::GetAndSetHiddenSize(const ParameterPtr &weight_input return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/parser/pytorch/pytorch_lstm_adjust.h b/mindspore-lite/tools/converter/parser/pytorch/pytorch_lstm_adjust.h index 9d54792d..6372e798 100644 --- a/mindspore-lite/tools/converter/parser/pytorch/pytorch_lstm_adjust.h +++ b/mindspore-lite/tools/converter/parser/pytorch/pytorch_lstm_adjust.h @@ -25,7 +25,7 @@ #include "include/common/utils/utils.h" #include "tools/optimizer/common/format_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class PytorchLstmAdjustPass { public: @@ -44,5 +44,5 @@ class PytorchLstmAdjustPass { int64_t *hidden_size); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/tools/converter/parser/tf/functionalize_cond.cc b/mindspore-lite/tools/converter/parser/tf/functionalize_cond.cc index 35832e7b..67e38892 100644 --- a/mindspore-lite/tools/converter/parser/tf/functionalize_cond.cc +++ b/mindspore-lite/tools/converter/parser/tf/functionalize_cond.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { STATUS FunctionalizeCond::GetSwitchBranchType(const CNodePtr &switch_cnode, const std::unordered_set &link, BranchType *branch_type) { MS_ASSERT(switch_cnode != nullptr); @@ -419,4 +419,4 @@ STATUS FunctionalizeCond::Process() { } return RET_OK; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/parser/tf/functionalize_cond.h b/mindspore-lite/tools/converter/parser/tf/functionalize_cond.h index d0521490..885f5859 100644 --- a/mindspore-lite/tools/converter/parser/tf/functionalize_cond.h +++ b/mindspore-lite/tools/converter/parser/tf/functionalize_cond.h @@ -26,7 +26,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "tools/converter/parser/tf/functionalize_control_op_pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { typedef enum { kThenBranch = 0, kElseBranch = 1 } BranchType; @@ -64,6 +64,6 @@ class FunctionalizeCond { std::vector input_nodes_{}; std::vector pred_nodes_{}; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_FUNCTIONALIZE_COND_H_ diff --git a/mindspore-lite/tools/converter/parser/tf/functionalize_control_op_pass.cc b/mindspore-lite/tools/converter/parser/tf/functionalize_control_op_pass.cc index cbadd724..ef8a3dff 100644 --- a/mindspore-lite/tools/converter/parser/tf/functionalize_control_op_pass.cc +++ b/mindspore-lite/tools/converter/parser/tf/functionalize_control_op_pass.cc @@ -23,7 +23,7 @@ #include "nnacl_c/op_base.h" #include "src/common/log_util.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { FuncGraphPtr FunctionalizeControlOpPass::NewFuncGraph(const std::string &subgraph_name, const FmkType &fmk_type) { auto fg = std::make_shared(); @@ -199,4 +199,4 @@ CNodePtr FunctionalizeControlOpPass::BelongToWhichNode(const CNodePtr &node, con return aim_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/parser/tf/functionalize_control_op_pass.h b/mindspore-lite/tools/converter/parser/tf/functionalize_control_op_pass.h index fc601079..9b0ea30a 100644 --- a/mindspore-lite/tools/converter/parser/tf/functionalize_control_op_pass.h +++ b/mindspore-lite/tools/converter/parser/tf/functionalize_control_op_pass.h @@ -29,7 +29,7 @@ #include "mindspore/core/include/ir/graph_utils.h" using mindspore::converter::FmkType; -namespace mindspore::opt { +namespace mindspore::lite::opt { using AimFunc = std::function; class FunctionalizeControlOpPass : public Pass { public: @@ -73,5 +73,5 @@ class FunctionalizeControlOpPass : public Pass { std::vector>> node_clusters_{}; std::vector loop_cond_nodes_{}; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_FUNCTIONALIZE_CONTROL_OP_PASS_H_ diff --git a/mindspore-lite/tools/converter/parser/tf/functionalize_while.cc b/mindspore-lite/tools/converter/parser/tf/functionalize_while.cc index 1d22f6f8..9002a9c3 100644 --- a/mindspore-lite/tools/converter/parser/tf/functionalize_while.cc +++ b/mindspore-lite/tools/converter/parser/tf/functionalize_while.cc @@ -41,7 +41,7 @@ mindspore::ValueNodePtr GetWhileAnfPrim() { } } // namespace -namespace mindspore::opt { +namespace mindspore::lite::opt { using mindspore::lite::RET_NULL_PTR; @@ -664,4 +664,4 @@ STATUS FunctionalizeWhile::Process() { } return ret; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/parser/tf/functionalize_while.h b/mindspore-lite/tools/converter/parser/tf/functionalize_while.h index b7ffbd21..f460e551 100644 --- a/mindspore-lite/tools/converter/parser/tf/functionalize_while.h +++ b/mindspore-lite/tools/converter/parser/tf/functionalize_while.h @@ -24,7 +24,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "tools/converter/parser/tf/functionalize_control_op_pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { constexpr const int POS_INVALID = -1; @@ -91,5 +91,5 @@ class FunctionalizeWhile { std::map cond_subgraph_input_map_{}; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_SRC_PASS_FUNCTIONALIZE_WHILE_PASS_H_ diff --git a/mindspore-lite/tools/converter/parser/tf_bidirection_gru_cf_fusion.cc b/mindspore-lite/tools/converter/parser/tf_bidirection_gru_cf_fusion.cc index e4cf2609..c9d0a4fa 100644 --- a/mindspore-lite/tools/converter/parser/tf_bidirection_gru_cf_fusion.cc +++ b/mindspore-lite/tools/converter/parser/tf_bidirection_gru_cf_fusion.cc @@ -43,7 +43,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kNumFwVars = 4; @@ -237,4 +237,4 @@ const AnfNodePtr TfBidirectionGruCfFusion::Process(const FuncGraphPtr &func_grap return output_node; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/parser/tf_bidirection_gru_cf_fusion.h b/mindspore-lite/tools/converter/parser/tf_bidirection_gru_cf_fusion.h index a7fc2505..dede7b75 100644 --- a/mindspore-lite/tools/converter/parser/tf_bidirection_gru_cf_fusion.h +++ b/mindspore-lite/tools/converter/parser/tf_bidirection_gru_cf_fusion.h @@ -25,7 +25,7 @@ #include "include/common/utils/utils.h" #include "include/errorcode.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { // fuse tf 1.x bidirection_gru into MSLITE GRU class TfBidirectionGruCfFusion : public TfBidirectionGruFusion { @@ -46,6 +46,6 @@ class TfBidirectionGruCfFusion : public TfBidirectionGruFusion { const VarPtr &init_state) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_BIDIRECTION_GRU_CF_FUSION_H_ diff --git a/mindspore-lite/tools/converter/parser/unused_node_remove_pass.cc b/mindspore-lite/tools/converter/parser/unused_node_remove_pass.cc index 17b97596..caf5528b 100644 --- a/mindspore-lite/tools/converter/parser/unused_node_remove_pass.cc +++ b/mindspore-lite/tools/converter/parser/unused_node_remove_pass.cc @@ -20,7 +20,7 @@ #include #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { STATUS UnusedNodeRemovePass::ProcessGraph(const FuncGraphPtr &func_graph, std::set *has_visited) { MS_ASSERT(func_graph != nullptr && has_visited != nullptr); @@ -75,4 +75,4 @@ bool UnusedNodeRemovePass::Run(const FuncGraphPtr &func_graph) { auto status = ProcessGraph(func_graph, &has_visited); return status == RET_OK; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/converter/parser/unused_node_remove_pass.h b/mindspore-lite/tools/converter/parser/unused_node_remove_pass.h index 6746bc6b..7beaae6a 100644 --- a/mindspore-lite/tools/converter/parser/unused_node_remove_pass.h +++ b/mindspore-lite/tools/converter/parser/unused_node_remove_pass.h @@ -22,7 +22,7 @@ #include "include/errorcode.h" using mindspore::lite::STATUS; -namespace mindspore::opt { +namespace mindspore::lite::opt { class UnusedNodeRemovePass : public Pass { public: UnusedNodeRemovePass() : Pass("remove_unused_node_pass") {} @@ -32,6 +32,6 @@ class UnusedNodeRemovePass : public Pass { private: STATUS ProcessGraph(const FuncGraphPtr &func_graph, std::set *has_visited); }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_UNUSED_NODE_REMOVE_PASS_H_ diff --git a/mindspore-lite/tools/converter/quantizer/gptq_quantizer.h b/mindspore-lite/tools/converter/quantizer/gptq_quantizer.h index cf183900..a98b12d6 100644 --- a/mindspore-lite/tools/converter/quantizer/gptq_quantizer.h +++ b/mindspore-lite/tools/converter/quantizer/gptq_quantizer.h @@ -31,7 +31,7 @@ #include "ir/anf.h" #include "nnacl_c/matmul_parameter.h" -namespace mindspore { +namespace mindspore::lite { namespace lite::quant { struct WeightInfo { @@ -105,5 +105,5 @@ class GptqQuantizer { Model *model_ = nullptr; }; } // namespace lite::quant -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_QUANTIZER_GPTQ_QUANTIZER_H_ diff --git a/mindspore-lite/tools/converter/registry/model_parser_registry.cc b/mindspore-lite/tools/converter/registry/model_parser_registry.cc index 1421775a..9d099082 100644 --- a/mindspore-lite/tools/converter/registry/model_parser_registry.cc +++ b/mindspore-lite/tools/converter/registry/model_parser_registry.cc @@ -19,7 +19,7 @@ #include "src/common/log_adapter.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { namespace registry { namespace { std::map model_parser_room; @@ -51,4 +51,4 @@ converter::ModelParser *ModelParserRegistry::GetModelParser(FmkType fmk) { return nullptr; } } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/registry/node_parser_registry.cc b/mindspore-lite/tools/converter/registry/node_parser_registry.cc index 4910f254..39c1c43b 100644 --- a/mindspore-lite/tools/converter/registry/node_parser_registry.cc +++ b/mindspore-lite/tools/converter/registry/node_parser_registry.cc @@ -20,7 +20,7 @@ #include #include "src/common/log_adapter.h" -namespace mindspore { +namespace mindspore::lite { namespace registry { namespace { constexpr size_t kOpNumLimit = 10000; @@ -56,4 +56,4 @@ converter::NodeParserPtr NodeParserRegistry::GetNodeParser(converter::FmkType fm return iter_level2->second; } } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/converter/registry/pass_registry.cc b/mindspore-lite/tools/converter/registry/pass_registry.cc index 7bbaccb5..1ac08a41 100644 --- a/mindspore-lite/tools/converter/registry/pass_registry.cc +++ b/mindspore-lite/tools/converter/registry/pass_registry.cc @@ -22,7 +22,7 @@ #include "src/common/log_adapter.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { namespace registry { namespace { constexpr size_t kPassNumLimit = 10000; @@ -67,4 +67,4 @@ PassBasePtr PassRegistry::GetPassFromStoreRoom(const std::vector &pass_nam return outer_pass_storage.find(pass_name) == outer_pass_storage.end() ? nullptr : outer_pass_storage[pass_name]; } } // namespace registry -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/common/format_utils.cc b/mindspore-lite/tools/optimizer/common/format_utils.cc index 1dd45fe2..c96d309f 100644 --- a/mindspore-lite/tools/optimizer/common/format_utils.cc +++ b/mindspore-lite/tools/optimizer/common/format_utils.cc @@ -83,7 +83,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "ir/tensor_new.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { // treat the weight of deformableConv2d as an input instead of a const because of the ops infershape only support nchw. static const std::unordered_map> NHWCOpMap = { @@ -417,4 +417,4 @@ int ConvertAbstractFormatShape(const AbstractBasePtr &abstract, FormatTransNodeT return RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/common/format_utils.h b/mindspore-lite/tools/optimizer/common/format_utils.h index f8a07794..ab2dd55e 100644 --- a/mindspore-lite/tools/optimizer/common/format_utils.h +++ b/mindspore-lite/tools/optimizer/common/format_utils.h @@ -23,7 +23,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "utils/check_convert_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { constexpr auto kOutputsFormat = "outputs_format"; enum FormatTransNodeType { kNCHW2NHWC, kNHWC2NCHW, kNONE }; @@ -49,6 +49,6 @@ int SetAbstractTensorInfo(const AbstractBasePtr &abstract); STATUS GetFormatSensitiveOpInsertIndex(const CNodePtr &cnode, std::vector *insert_index); int ConvertAbstractFormatShape(const AbstractBasePtr &abstract, FormatTransNodeType perm); } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_COMMON_FORMAT_UTILS_H_ diff --git a/mindspore-lite/tools/optimizer/common/gllo_utils.cc b/mindspore-lite/tools/optimizer/common/gllo_utils.cc index 5b433bba..c503d6a5 100644 --- a/mindspore-lite/tools/optimizer/common/gllo_utils.cc +++ b/mindspore-lite/tools/optimizer/common/gllo_utils.cc @@ -60,7 +60,7 @@ #include "ir/tensor_new.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr auto kAnfPrimitiveIndex = 0; @@ -2058,4 +2058,4 @@ STATUS GetPrimFromCnode(const CNodePtr &cnode, PrimitivePtr *prim_ptr) { } }; // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/common/gllo_utils.h b/mindspore-lite/tools/optimizer/common/gllo_utils.h index c75381cd..e1878587 100644 --- a/mindspore-lite/tools/optimizer/common/gllo_utils.h +++ b/mindspore-lite/tools/optimizer/common/gllo_utils.h @@ -37,7 +37,7 @@ using PrimitiveCPtr = std::shared_ptr; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::STATUS; -namespace mindspore { +namespace mindspore::lite { namespace opt { // used for common op, which corresponding value is a boolean. constexpr auto kInferDone = "infer_done"; @@ -232,5 +232,5 @@ const float GetFloatParameterValue(const EquivPtr &equiv, const VarPtr &input); const int GetIntParameterValue(const EquivPtr &equiv, const VarPtr &input); STATUS GetPrimFromCnode(const CNodePtr &cnode, PrimitivePtr *prim_ptr); } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_COMMON_GLLO_UTILS_H_ diff --git a/mindspore-lite/tools/optimizer/common/helper.cc b/mindspore-lite/tools/optimizer/common/helper.cc index 5b511e0f..d0890bd8 100644 --- a/mindspore-lite/tools/optimizer/common/helper.cc +++ b/mindspore-lite/tools/optimizer/common/helper.cc @@ -23,7 +23,7 @@ #include "nnacl_c/op_base.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { ValueNodePtr Helper::CreateValueNodeWithSexp(const BaseRef &sexp) { if (utils::isa(sexp)) { @@ -300,4 +300,4 @@ AnfNodePtr GetAnfNodeByVar(const EquivPtr &equiv, const VarPtr &var_node) { return res; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/common/helper.h b/mindspore-lite/tools/optimizer/common/helper.h index 82fdefca..fc92a3b3 100644 --- a/mindspore-lite/tools/optimizer/common/helper.h +++ b/mindspore-lite/tools/optimizer/common/helper.h @@ -23,7 +23,7 @@ #include "include/backend/optimizer/helper.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class Helper { public: @@ -42,6 +42,6 @@ class Helper { bool multigraph); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_COMMON_HELPER_H_ diff --git a/mindspore-lite/tools/optimizer/common/multiple_pattern_process_pass.cc b/mindspore-lite/tools/optimizer/common/multiple_pattern_process_pass.cc index e809de0e..7eef6df9 100644 --- a/mindspore-lite/tools/optimizer/common/multiple_pattern_process_pass.cc +++ b/mindspore-lite/tools/optimizer/common/multiple_pattern_process_pass.cc @@ -18,7 +18,7 @@ #include "tools/optimizer/common/helper.h" #include "nnacl_c/op_base.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { AnfNodePtr MultiplePatternProcessPass::Run(const FuncGraphPtr &func_graph, const AnfNodePtr &node) { if (patterns_.empty()) { VarPtr fg = std::make_shared("RootG"); @@ -48,4 +48,4 @@ AnfNodePtr MultiplePatternProcessPass::Run(const FuncGraphPtr &func_graph, const } return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/common/multiple_pattern_process_pass.h b/mindspore-lite/tools/optimizer/common/multiple_pattern_process_pass.h index 332b4ed8..a38dbe74 100644 --- a/mindspore-lite/tools/optimizer/common/multiple_pattern_process_pass.h +++ b/mindspore-lite/tools/optimizer/common/multiple_pattern_process_pass.h @@ -26,7 +26,7 @@ #include "include/backend/optimizer/helper.h" #include "tools/optimizer/common/node_pass_extends.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class MultiplePatternProcessPass : public LiteNodePass { public: @@ -44,6 +44,6 @@ class MultiplePatternProcessPass : public LiteNodePass { PatternEngine pattern_engine_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_COMMON_MULTIPLE_PATTERN_PROCESS_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/common/node_pass_extends.cc b/mindspore-lite/tools/optimizer/common/node_pass_extends.cc index 3cea9f6c..1a1db53f 100644 --- a/mindspore-lite/tools/optimizer/common/node_pass_extends.cc +++ b/mindspore-lite/tools/optimizer/common/node_pass_extends.cc @@ -24,7 +24,7 @@ #include "ir/manager.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool NodePass::Run(const FuncGraphPtr &func_graph) { MS_LOG(ERROR) << "stub func"; @@ -91,4 +91,4 @@ bool LiteNodePass::Run(const FuncGraphPtr &func_graph) { return changes; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/common/node_pass_extends.h b/mindspore-lite/tools/optimizer/common/node_pass_extends.h index 492f071d..16597c29 100644 --- a/mindspore-lite/tools/optimizer/common/node_pass_extends.h +++ b/mindspore-lite/tools/optimizer/common/node_pass_extends.h @@ -21,7 +21,7 @@ #include #include "include/backend/optimizer/node_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class LiteNodePass : public NodePass { public: @@ -31,5 +31,5 @@ class LiteNodePass : public NodePass { virtual AnfNodePtr Run(const FuncGraphPtr &func_graph, const AnfNodePtr &node) = 0; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_COMMON_NODE_PASS_EXTENDS_H_ diff --git a/mindspore-lite/tools/optimizer/common/pass_manager_extends.cc b/mindspore-lite/tools/optimizer/common/pass_manager_extends.cc index 30b41766..0b52987a 100644 --- a/mindspore-lite/tools/optimizer/common/pass_manager_extends.cc +++ b/mindspore-lite/tools/optimizer/common/pass_manager_extends.cc @@ -23,7 +23,7 @@ #include "ir/anf.h" #include "backend/common/pass_manager/cache_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { constexpr size_t kMaxRepassTimes = 12; constexpr uint64_t kUSecondInSecond = 1000000; @@ -135,4 +135,4 @@ bool LitePassManager::Run(const FuncGraphPtr &func_graph) const { return changed; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/common/pass_manager_extends.h b/mindspore-lite/tools/optimizer/common/pass_manager_extends.h index d834972d..43de648e 100644 --- a/mindspore-lite/tools/optimizer/common/pass_manager_extends.h +++ b/mindspore-lite/tools/optimizer/common/pass_manager_extends.h @@ -21,7 +21,7 @@ #include #include "include/backend/optimizer/pass_manager.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class LitePassManager : public PassManager { public: @@ -37,5 +37,5 @@ class LitePassManager : public PassManager { std::string GetPassFullname(size_t pass_id, const PassPtr &pass) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_COMMON_PASS_MANAGER_EXTENDS_H_ diff --git a/mindspore-lite/tools/optimizer/common/pattern_process_pass_extends.cc b/mindspore-lite/tools/optimizer/common/pattern_process_pass_extends.cc index a2fa3bee..4160e8d6 100644 --- a/mindspore-lite/tools/optimizer/common/pattern_process_pass_extends.cc +++ b/mindspore-lite/tools/optimizer/common/pattern_process_pass_extends.cc @@ -25,7 +25,7 @@ #include "ir/manager.h" #include "tools/optimizer/common/helper.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { void LitePatternProcessPass::Build() { VarPtr fg = std::make_shared("RootG"); @@ -50,4 +50,4 @@ AnfNodePtr LitePatternProcessPass::Run(const FuncGraphPtr &func_graph, const Anf return nullptr; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/common/pattern_process_pass_extends.h b/mindspore-lite/tools/optimizer/common/pattern_process_pass_extends.h index 0b383e03..8489dd3d 100644 --- a/mindspore-lite/tools/optimizer/common/pattern_process_pass_extends.h +++ b/mindspore-lite/tools/optimizer/common/pattern_process_pass_extends.h @@ -22,7 +22,7 @@ #include "include/backend/optimizer/pattern_engine.h" #include "tools/optimizer/common/node_pass_extends.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class LitePatternProcessPass : public LiteNodePass { public: @@ -47,5 +47,5 @@ class LitePatternProcessPass : public LiteNodePass { EquivPtr equiv_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_COMMON_PATTERN_PROCESS_PASS_EXTENDS_H_ diff --git a/mindspore-lite/tools/optimizer/const_fold/constant_folding_fusion.h b/mindspore-lite/tools/optimizer/const_fold/constant_folding_fusion.h index 6f0fba20..0d060d54 100644 --- a/mindspore-lite/tools/optimizer/const_fold/constant_folding_fusion.h +++ b/mindspore-lite/tools/optimizer/const_fold/constant_folding_fusion.h @@ -23,7 +23,7 @@ #include "tools/optimizer/const_fold/fold_with_infershape.h" #include "tools/optimizer/graph/update_conv2d_param_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ConstFoldPass : public Pass { public: @@ -64,5 +64,5 @@ class ConstFoldPass : public Pass { bool train_flag_{false}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_CONST_FOLD_CONSTANT_FOLDING_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/const_fold/fold_along_infershape.cc b/mindspore-lite/tools/optimizer/const_fold/fold_along_infershape.cc index 39cd9f70..84217e6a 100644 --- a/mindspore-lite/tools/optimizer/const_fold/fold_along_infershape.cc +++ b/mindspore-lite/tools/optimizer/const_fold/fold_along_infershape.cc @@ -22,7 +22,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { STATUS ConstFoldAlongInferShape::PostProcess(const FuncGraphPtr &func_graph, const CNodePtr &cnode) { MS_ASSERT(func_graph != nullptr && cnode != nullptr); @@ -73,4 +73,4 @@ bool ConstFoldAlongInferShape::CheckCanFold(const FuncGraphPtr &func_graph, cons return false; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/const_fold/fold_along_infershape.h b/mindspore-lite/tools/optimizer/const_fold/fold_along_infershape.h index 0b9e0e67..eb7c3eac 100644 --- a/mindspore-lite/tools/optimizer/const_fold/fold_along_infershape.h +++ b/mindspore-lite/tools/optimizer/const_fold/fold_along_infershape.h @@ -21,7 +21,7 @@ #include "tools/optimizer/graph/infershape_pass.h" #include "tools/optimizer/const_fold/fold_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ConstFoldAlongInferShape : public InferShapePass { public: @@ -35,5 +35,5 @@ class ConstFoldAlongInferShape : public InferShapePass { std::shared_ptr const_fold_processor_{nullptr}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_CONST_FOLD_FOLD_ALONG_INFERSHAPE_H_ diff --git a/mindspore-lite/tools/optimizer/const_fold/fold_utils.cc b/mindspore-lite/tools/optimizer/const_fold/fold_utils.cc index 6a4648e7..dbe3d568 100644 --- a/mindspore-lite/tools/optimizer/const_fold/fold_utils.cc +++ b/mindspore-lite/tools/optimizer/const_fold/fold_utils.cc @@ -44,7 +44,7 @@ using mindspore::lite::KernelRegistry; using mindspore::lite::Tensor; #include "ir/tensor_new.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { bool IsInferInRunning(const CNodePtr &cnode) { return CheckPrimitiveType(cnode, prim::kPrimWhere); } @@ -327,4 +327,4 @@ int ConstFoldProcessor::DoConstantFold(const FuncGraphPtr &func_graph, const CNo return status; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/const_fold/fold_utils.h b/mindspore-lite/tools/optimizer/const_fold/fold_utils.h index e60c3c9b..4b6d22a0 100644 --- a/mindspore-lite/tools/optimizer/const_fold/fold_utils.h +++ b/mindspore-lite/tools/optimizer/const_fold/fold_utils.h @@ -24,7 +24,7 @@ #include "schema/inner/model_generated.h" #include "src/litert/inner_context.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ConstFoldProcessor { public: @@ -41,5 +41,5 @@ class ConstFoldProcessor { std::shared_ptr ms_context_{nullptr}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_CONST_FOLD_FOLD_UTILS_H_ diff --git a/mindspore-lite/tools/optimizer/const_fold/fold_with_infershape.cc b/mindspore-lite/tools/optimizer/const_fold/fold_with_infershape.cc index 72a292f7..9dd4d5ef 100644 --- a/mindspore-lite/tools/optimizer/const_fold/fold_with_infershape.cc +++ b/mindspore-lite/tools/optimizer/const_fold/fold_with_infershape.cc @@ -25,7 +25,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr auto kIsLinkWithControlFlow = "link_with_control_flow"; } // namespace @@ -161,4 +161,4 @@ bool ConstFoldWithInferShape::CheckCanSpecialFold(const CNodePtr &cnode) const { } return CheckCanCommonFold(cnode); } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/const_fold/fold_with_infershape.h b/mindspore-lite/tools/optimizer/const_fold/fold_with_infershape.h index 85c9df26..6815331f 100644 --- a/mindspore-lite/tools/optimizer/const_fold/fold_with_infershape.h +++ b/mindspore-lite/tools/optimizer/const_fold/fold_with_infershape.h @@ -25,7 +25,7 @@ #include "tools/optimizer/graph/node_infershape.h" #include "tools/optimizer/const_fold/fold_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ConstFoldWithInferShape : public Pass { public: @@ -46,5 +46,5 @@ class ConstFoldWithInferShape : public Pass { FuncGraphManagerPtr manager_{nullptr}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_CONST_FOLD_FOLD_WITH_INFERSHAPE_H_ diff --git a/mindspore-lite/tools/optimizer/const_fold/rsqrt_fp32.cc b/mindspore-lite/tools/optimizer/const_fold/rsqrt_fp32.cc index 6d606e13..fce5080c 100644 --- a/mindspore-lite/tools/optimizer/const_fold/rsqrt_fp32.cc +++ b/mindspore-lite/tools/optimizer/const_fold/rsqrt_fp32.cc @@ -17,7 +17,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -namespace mindspore::kernel { +namespace mindspore::lite::kernel { int HighAccuracyRsqrtCPUKernel::Prepare() { CHECK_NOT_EQUAL_RETURN(in_tensors_.size(), 1); CHECK_NOT_EQUAL_RETURN(out_tensors_.size(), 1); @@ -46,4 +46,4 @@ int HighAccuracyRsqrtCPUKernel::Run() { } return RET_OK; } -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel diff --git a/mindspore-lite/tools/optimizer/const_fold/rsqrt_fp32.h b/mindspore-lite/tools/optimizer/const_fold/rsqrt_fp32.h index f9ad0c22..98e0a52c 100644 --- a/mindspore-lite/tools/optimizer/const_fold/rsqrt_fp32.h +++ b/mindspore-lite/tools/optimizer/const_fold/rsqrt_fp32.h @@ -22,7 +22,7 @@ #include #include "src/litert/lite_kernel.h" -namespace mindspore::kernel { +namespace mindspore::lite::kernel { class HighAccuracyRsqrtCPUKernel : public LiteKernel { public: explicit HighAccuracyRsqrtCPUKernel(OpParameter *parameter, const std::vector &inputs, @@ -34,6 +34,6 @@ class HighAccuracyRsqrtCPUKernel : public LiteKernel { int ReSize() override; int Run() override; }; -} // namespace mindspore::kernel +} // namespace mindspore::lite::kernel #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_CONST_FOLD_RSQRT_FP32_H_ diff --git a/mindspore-lite/tools/optimizer/fisson/eliminate_concat_split.cc b/mindspore-lite/tools/optimizer/fisson/eliminate_concat_split.cc index a935cecd..e29b26cf 100644 --- a/mindspore-lite/tools/optimizer/fisson/eliminate_concat_split.cc +++ b/mindspore-lite/tools/optimizer/fisson/eliminate_concat_split.cc @@ -35,7 +35,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { CNodePtr GetRealPrevCNode(const AnfNodePtr &node) { @@ -182,4 +182,4 @@ const AnfNodePtr EliminateConcatSplit::Process(const FuncGraphPtr &func_graph, c return node; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fisson/eliminate_concat_split.h b/mindspore-lite/tools/optimizer/fisson/eliminate_concat_split.h index ef1a1f44..0b0f5b13 100644 --- a/mindspore-lite/tools/optimizer/fisson/eliminate_concat_split.h +++ b/mindspore-lite/tools/optimizer/fisson/eliminate_concat_split.h @@ -20,7 +20,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "tools/optimizer/fisson/fisson_util.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class EliminateConcatSplit : public LitePatternProcessPass { public: @@ -33,5 +33,5 @@ class EliminateConcatSplit : public LitePatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FISSON_ELIMINATE_CONCAT_SPLIT_H_ diff --git a/mindspore-lite/tools/optimizer/fisson/fisson_util.cc b/mindspore-lite/tools/optimizer/fisson/fisson_util.cc index c751ffd7..32872f42 100644 --- a/mindspore-lite/tools/optimizer/fisson/fisson_util.cc +++ b/mindspore-lite/tools/optimizer/fisson/fisson_util.cc @@ -34,7 +34,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" using mindspore::converter::FmkType; -namespace mindspore { +namespace mindspore::lite { namespace opt { std::vector GetSplitPadList(const api::SharedPtr &ori_conv_prim, int64_t input_h, int64_t input_w) { @@ -472,4 +472,4 @@ bool UpdateRatioWithPadStride(int64_t *ratio, size_t ratio_len, size_t split_siz return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fisson/fisson_util.h b/mindspore-lite/tools/optimizer/fisson/fisson_util.h index af1ee78c..e4311572 100644 --- a/mindspore-lite/tools/optimizer/fisson/fisson_util.h +++ b/mindspore-lite/tools/optimizer/fisson/fisson_util.h @@ -26,7 +26,7 @@ #include "include/lite_types.h" #include "infer/cxx_api/conv2d_fusion.h" -namespace mindspore { +namespace mindspore::lite { using mindspore::schema::PrimitiveType; namespace opt { @@ -65,5 +65,5 @@ bool CreateOutputsOfSplitWithOverlap(const FuncGraphPtr &func_graph, const AnfNo const std::string &node_name); bool UpdateRatioWithPadStride(int64_t *ratio, size_t ratio_len, size_t split_size, int split_dim_size); } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FISSON_FISSON_UTIL_H_ diff --git a/mindspore-lite/tools/optimizer/fisson/iter_node_outputs.cc b/mindspore-lite/tools/optimizer/fisson/iter_node_outputs.cc index 4dc8858f..78785194 100644 --- a/mindspore-lite/tools/optimizer/fisson/iter_node_outputs.cc +++ b/mindspore-lite/tools/optimizer/fisson/iter_node_outputs.cc @@ -19,7 +19,7 @@ #include "tools/optimizer/parallel/spliter.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { AnfNodePtr IterNodeOutputs::Run(const FuncGraphPtr &func_graph, const AnfNodePtr &node) { MS_CHECK_TRUE_MSG(func_graph != nullptr, nullptr, "input func_graph is nullptr"); @@ -42,4 +42,4 @@ AnfNodePtr IterNodeOutputs::Run(const FuncGraphPtr &func_graph, const AnfNodePtr return nullptr; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fisson/iter_node_outputs.h b/mindspore-lite/tools/optimizer/fisson/iter_node_outputs.h index 48f16cef..8d1a0bac 100644 --- a/mindspore-lite/tools/optimizer/fisson/iter_node_outputs.h +++ b/mindspore-lite/tools/optimizer/fisson/iter_node_outputs.h @@ -20,7 +20,7 @@ #ifndef MINDSPORE_LITE_TOOLS_OPTIMIZER_FISSON_ITER_NODE_OUTPUTS_H_ #define MINDSPORE_LITE_TOOLS_OPTIMIZER_FISSON_ITER_NODE_OUTPUTS_H_ -namespace mindspore { +namespace mindspore::lite { namespace opt { class IterNodeOutputs : public opt::LiteNodePass { public: @@ -29,6 +29,6 @@ class IterNodeOutputs : public opt::LiteNodePass { AnfNodePtr Run(const FuncGraphPtr &func_graph, const AnfNodePtr &node) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FISSON_ITER_NODE_OUTPUTS_H_ diff --git a/mindspore-lite/tools/optimizer/fisson/multi_conv_split_pass.cc b/mindspore-lite/tools/optimizer/fisson/multi_conv_split_pass.cc index dd2b6036..ac01f088 100644 --- a/mindspore-lite/tools/optimizer/fisson/multi_conv_split_pass.cc +++ b/mindspore-lite/tools/optimizer/fisson/multi_conv_split_pass.cc @@ -27,7 +27,7 @@ #include "ops_utils/op_utils.h" using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; -namespace mindspore { +namespace mindspore::lite { namespace opt { std::string MultiConvSplitPass::IsMultiParallelConvNode(const AnfNodePtr &node) const { MS_ASSERT(node != nullptr); @@ -94,4 +94,4 @@ const AnfNodePtr MultiConvSplitPass::Process(const FuncGraphPtr &func_graph, con } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fisson/multi_conv_split_pass.h b/mindspore-lite/tools/optimizer/fisson/multi_conv_split_pass.h index 47de1638..48096f4c 100644 --- a/mindspore-lite/tools/optimizer/fisson/multi_conv_split_pass.h +++ b/mindspore-lite/tools/optimizer/fisson/multi_conv_split_pass.h @@ -28,7 +28,7 @@ #include "tools/optimizer/parallel/multi_node_split.h" using mindspore::schema::PrimitiveType; -namespace mindspore { +namespace mindspore::lite { namespace opt { class MultiConvSplitPass : public LitePatternProcessPass { @@ -52,5 +52,5 @@ class MultiConvSplitPass : public LitePatternProcessPass { int32_t num_{0}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FISSON_MULTI_CONV_SPLIT_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/fisson/node_out_shapes.cc b/mindspore-lite/tools/optimizer/fisson/node_out_shapes.cc index baf877a7..de5c46f4 100644 --- a/mindspore-lite/tools/optimizer/fisson/node_out_shapes.cc +++ b/mindspore-lite/tools/optimizer/fisson/node_out_shapes.cc @@ -21,7 +21,7 @@ #include "tools/optimizer/parallel/spliter.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { AnfNodePtr NodeOutShapes::Run(const FuncGraphPtr &func_graph, const AnfNodePtr &node) { MS_CHECK_TRUE_MSG(func_graph != nullptr, nullptr, "input func_graph is nullptr"); @@ -75,4 +75,4 @@ AnfNodePtr NodeOutShapes::Run(const FuncGraphPtr &func_graph, const AnfNodePtr & return node; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fisson/node_out_shapes.h b/mindspore-lite/tools/optimizer/fisson/node_out_shapes.h index 9b88045f..de99207c 100644 --- a/mindspore-lite/tools/optimizer/fisson/node_out_shapes.h +++ b/mindspore-lite/tools/optimizer/fisson/node_out_shapes.h @@ -20,7 +20,7 @@ #ifndef MINDSPORE_LITE_TOOLS_OPTIMIZER_FISSON_NODE_OUT_SHAPES_H_ #define MINDSPORE_LITE_TOOLS_OPTIMIZER_FISSON_NODE_OUT_SHAPES_H_ -namespace mindspore { +namespace mindspore::lite { namespace opt { class NodeOutShapes : public opt::LiteNodePass { public: @@ -30,6 +30,6 @@ class NodeOutShapes : public opt::LiteNodePass { }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FISSON_NODE_OUT_SHAPES_H_ diff --git a/mindspore-lite/tools/optimizer/format/delete_redundant_transpose.cc b/mindspore-lite/tools/optimizer/format/delete_redundant_transpose.cc index 40acc519..7cacca44 100644 --- a/mindspore-lite/tools/optimizer/format/delete_redundant_transpose.cc +++ b/mindspore-lite/tools/optimizer/format/delete_redundant_transpose.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_w.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { STATUS DeleteRedundantTranspose::DeleteControlFlowTranspose(const CNodePtr &cnode) { auto sub_func_graph = GetValueNode(cnode->input(1)); @@ -339,4 +339,4 @@ STATUS DeleteRedundantTranspose::CopyQuantParam(const CNodePtr &cnode, const CNo return RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/format/delete_redundant_transpose.h b/mindspore-lite/tools/optimizer/format/delete_redundant_transpose.h index b744dbdc..3b2f2c8d 100644 --- a/mindspore-lite/tools/optimizer/format/delete_redundant_transpose.h +++ b/mindspore-lite/tools/optimizer/format/delete_redundant_transpose.h @@ -20,7 +20,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class DeleteRedundantTranspose : public Pass { public: @@ -39,6 +39,6 @@ class DeleteRedundantTranspose : public Pass { FuncGraphManagerPtr manager_{nullptr}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FORMAT_DELETE_REDUNDANT_TRANSPOSE_H_ diff --git a/mindspore-lite/tools/optimizer/format/to_format_base.cc b/mindspore-lite/tools/optimizer/format/to_format_base.cc index cd0b6993..7afaaabc 100644 --- a/mindspore-lite/tools/optimizer/format/to_format_base.cc +++ b/mindspore-lite/tools/optimizer/format/to_format_base.cc @@ -36,7 +36,7 @@ #include "mindspore/core/include/ir/graph_utils.h" using mindspore::lite::NHWC_SHAPE; -namespace mindspore { +namespace mindspore::lite { namespace opt { STATUS ToFormatBase::GenNewInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode, const std::vector &perm, bool before, size_t index) { @@ -622,4 +622,4 @@ bool ToFormatBase::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/format/to_format_base.h b/mindspore-lite/tools/optimizer/format/to_format_base.h index 6e957fe2..c92bb0fc 100644 --- a/mindspore-lite/tools/optimizer/format/to_format_base.h +++ b/mindspore-lite/tools/optimizer/format/to_format_base.h @@ -29,7 +29,7 @@ #include "tools/optimizer/graph/infershape_pass.h" using mindspore::converter::FmkType; -namespace mindspore { +namespace mindspore::lite { namespace opt { class ToFormatBase : public Pass { public: @@ -83,6 +83,6 @@ class ToFormatBase : public Pass { FuncGraphManagerPtr manager_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FORMAT_TO_FORMAT_BASE_H_ diff --git a/mindspore-lite/tools/optimizer/format/to_nchw_format.cc b/mindspore-lite/tools/optimizer/format/to_nchw_format.cc index 24265844..e336a47f 100644 --- a/mindspore-lite/tools/optimizer/format/to_nchw_format.cc +++ b/mindspore-lite/tools/optimizer/format/to_nchw_format.cc @@ -18,7 +18,7 @@ #include "tools/optimizer/format/to_nchw_format.h" #include "ops_utils/op_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { STATUS ToNCHWFormat::GetTransNodeFormatType(const CNodePtr &cnode, opt::TransTypePair *trans_info) { MS_ERROR_IF_NULL_W_RET_VAL(cnode, lite::RET_ERROR); @@ -71,4 +71,4 @@ STATUS ToNCHWFormat::DecideConvWeightSrcAndDstFormat(const CNodePtr &cnode, sche return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/format/to_nchw_format.h b/mindspore-lite/tools/optimizer/format/to_nchw_format.h index b9e52707..7b9eed91 100644 --- a/mindspore-lite/tools/optimizer/format/to_nchw_format.h +++ b/mindspore-lite/tools/optimizer/format/to_nchw_format.h @@ -19,7 +19,7 @@ #include "tools/optimizer/format/to_format_base.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ToNCHWFormat : public ToFormatBase { public: @@ -37,6 +37,6 @@ class ToNCHWFormat : public ToFormatBase { schema::Format *dst_format) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FORMAT_TO_NCHW_FORMAT_H_ diff --git a/mindspore-lite/tools/optimizer/format/to_nhwc_format.cc b/mindspore-lite/tools/optimizer/format/to_nhwc_format.cc index 6916b66a..7a5590ae 100644 --- a/mindspore-lite/tools/optimizer/format/to_nhwc_format.cc +++ b/mindspore-lite/tools/optimizer/format/to_nhwc_format.cc @@ -19,7 +19,7 @@ #include #include "ops_utils/op_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { int CheckKFormat(const PrimitivePtr &prim, const std::string &node_name) { @@ -77,4 +77,4 @@ STATUS ToNHWCFormat::DecideConvWeightSrcAndDstFormat(const CNodePtr &cnode, sche return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/format/to_nhwc_format.h b/mindspore-lite/tools/optimizer/format/to_nhwc_format.h index 01f9e9d2..63d435b6 100644 --- a/mindspore-lite/tools/optimizer/format/to_nhwc_format.h +++ b/mindspore-lite/tools/optimizer/format/to_nhwc_format.h @@ -19,7 +19,7 @@ #include "tools/optimizer/format/to_format_base.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ToNHWCFormat : public ToFormatBase { public: @@ -35,5 +35,5 @@ class ToNHWCFormat : public ToFormatBase { schema::Format *dst_format) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FORMAT_TO_NHWC_FORMAT_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/activation_fusion.cc b/mindspore-lite/tools/optimizer/fusion/activation_fusion.cc index 06be3948..2ce283c0 100644 --- a/mindspore-lite/tools/optimizer/fusion/activation_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/activation_fusion.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { STATUS DoFusion(CNodePtr cur_cnode, const CNodePtr &pre_cnode) { auto cur_act_prim = ops::GetOperator(cur_cnode->input(0)); @@ -111,4 +111,4 @@ bool ActivationFusion::Run(const FuncGraphPtr &func_graph) { return false; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/activation_fusion.h b/mindspore-lite/tools/optimizer/fusion/activation_fusion.h index fdfd37f0..9339ad7f 100644 --- a/mindspore-lite/tools/optimizer/fusion/activation_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/activation_fusion.h @@ -21,7 +21,7 @@ #include "tools/converter/converter_context.h" #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ActivationFusion : public Pass { public: @@ -30,5 +30,5 @@ class ActivationFusion : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ACTIVATION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/add_activation_fusion.cc b/mindspore-lite/tools/optimizer/fusion/add_activation_fusion.cc index da34ced9..af9c7ca8 100644 --- a/mindspore-lite/tools/optimizer/fusion/add_activation_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/add_activation_fusion.cc @@ -28,7 +28,7 @@ #include "tools/converter/quantizer/quant_param_holder.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef AddActivationFusion::DefinePattern() const { auto is_add = std::make_shared(IsSpecifiedNode<&prim::kPrimAddFusion>); auto is_act = std::make_shared(IsSpecifiedNode<&prim::kPrimActivation>); @@ -144,4 +144,4 @@ bool AddActivationFusion::CheckPattern(const FuncGraphPtr &func_graph, const CNo } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/add_activation_fusion.h b/mindspore-lite/tools/optimizer/fusion/add_activation_fusion.h index 758d0121..dc65343f 100644 --- a/mindspore-lite/tools/optimizer/fusion/add_activation_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/add_activation_fusion.h @@ -24,7 +24,7 @@ #include "include/common/utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AddActivationFusion : public LitePatternProcessPass { public: @@ -42,5 +42,5 @@ class AddActivationFusion : public LitePatternProcessPass { const std::set support_act_types) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ADD_ACTIVATION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/add_concat_activation_fusion.cc b/mindspore-lite/tools/optimizer/fusion/add_concat_activation_fusion.cc index 1bd9fb8f..fc92b36f 100644 --- a/mindspore-lite/tools/optimizer/fusion/add_concat_activation_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/add_concat_activation_fusion.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef AddConcatActivationFusion::DefinePattern() const { auto is_act = std::make_shared(IsSpecifiedNode<&prim::kPrimActivation>); MS_CHECK_TRUE_RET(is_act != nullptr, {}); @@ -107,4 +107,4 @@ const AnfNodePtr AddConcatActivationFusion::Process(const FuncGraphPtr &func_gra return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/add_concat_activation_fusion.h b/mindspore-lite/tools/optimizer/fusion/add_concat_activation_fusion.h index fdf4c94e..ff0bb96b 100644 --- a/mindspore-lite/tools/optimizer/fusion/add_concat_activation_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/add_concat_activation_fusion.h @@ -20,7 +20,7 @@ #include #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class AddConcatActivationFusion : public LitePatternProcessPass { public: explicit AddConcatActivationFusion(bool multigraph = true, const std::string &name = "AddConcatActivationFusion") @@ -29,5 +29,5 @@ class AddConcatActivationFusion : public LitePatternProcessPass { const BaseRef DefinePattern() const override; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ADD_CONCAT_ACTIVATION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/add_layernorm_fusion.cc b/mindspore-lite/tools/optimizer/fusion/add_layernorm_fusion.cc index 6b50a3cf..cf195d77 100644 --- a/mindspore-lite/tools/optimizer/fusion/add_layernorm_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/add_layernorm_fusion.cc @@ -41,7 +41,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr int kAxis = -1; @@ -389,4 +389,4 @@ const AnfNodePtr FuseAddAndLayernorm::Process(const FuncGraphPtr &graph, const A } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/add_layernorm_fusion.h b/mindspore-lite/tools/optimizer/fusion/add_layernorm_fusion.h index a270065e..dedb5346 100644 --- a/mindspore-lite/tools/optimizer/fusion/add_layernorm_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/add_layernorm_fusion.h @@ -27,7 +27,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "mindspore/ops/op_def/nn_optimizer_ops.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class LayerNormV3Fusion : public MultiplePatternProcessPass { public: @@ -83,6 +83,6 @@ class FuseAddAndLayernorm : public opt::LitePatternProcessPass { }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ADD_LAYERNORM_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/add_stream_label_pass.cc b/mindspore-lite/tools/optimizer/fusion/add_stream_label_pass.cc index 5eb2f2fb..0216e0e5 100644 --- a/mindspore-lite/tools/optimizer/fusion/add_stream_label_pass.cc +++ b/mindspore-lite/tools/optimizer/fusion/add_stream_label_pass.cc @@ -23,7 +23,7 @@ #include "common/common.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kMaxLineCount = 100; @@ -182,4 +182,4 @@ bool AddStreamLabelPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/add_stream_label_pass.h b/mindspore-lite/tools/optimizer/fusion/add_stream_label_pass.h index 0b025147..a671bd69 100644 --- a/mindspore-lite/tools/optimizer/fusion/add_stream_label_pass.h +++ b/mindspore-lite/tools/optimizer/fusion/add_stream_label_pass.h @@ -24,7 +24,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/converter/cxx_api/converter_para.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AddStreamLabelPass : public Pass { public: @@ -45,6 +45,6 @@ class AddStreamLabelPass : public Pass { std::set all_node_names_ = {}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ADJUST_MATMUL_PASS_H diff --git a/mindspore-lite/tools/optimizer/fusion/adjust_col2im_pass.cc b/mindspore-lite/tools/optimizer/fusion/adjust_col2im_pass.cc index 3de0ee78..1396c081 100644 --- a/mindspore-lite/tools/optimizer/fusion/adjust_col2im_pass.cc +++ b/mindspore-lite/tools/optimizer/fusion/adjust_col2im_pass.cc @@ -32,7 +32,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { STATUS AddConstInputToAttr(const CNodePtr &cnode, const size_t input_index, const std::string &arg_name, @@ -171,4 +171,4 @@ bool AdjustCol2imPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/adjust_col2im_pass.h b/mindspore-lite/tools/optimizer/fusion/adjust_col2im_pass.h index 14012fa7..5725ba40 100644 --- a/mindspore-lite/tools/optimizer/fusion/adjust_col2im_pass.h +++ b/mindspore-lite/tools/optimizer/fusion/adjust_col2im_pass.h @@ -19,7 +19,7 @@ #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AdjustCol2imPass : public Pass { public: @@ -29,6 +29,6 @@ class AdjustCol2imPass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ADJUST_COL2IM_PASS_H diff --git a/mindspore-lite/tools/optimizer/fusion/adjust_controlflow_pass.cc b/mindspore-lite/tools/optimizer/fusion/adjust_controlflow_pass.cc index 2bf99be8..d23fb13c 100644 --- a/mindspore-lite/tools/optimizer/fusion/adjust_controlflow_pass.cc +++ b/mindspore-lite/tools/optimizer/fusion/adjust_controlflow_pass.cc @@ -32,7 +32,7 @@ #include "tools/converter/export_model.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { int32_t AdjustControlflowPass::AdjustBranchs(const FuncGraphPtr &branch, const FuncGraphPtr &func_graph) { auto node_list = TopoSort(branch->get_return()); @@ -155,4 +155,4 @@ bool AdjustControlflowPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/adjust_controlflow_pass.h b/mindspore-lite/tools/optimizer/fusion/adjust_controlflow_pass.h index 0b7fb367..0491c27d 100644 --- a/mindspore-lite/tools/optimizer/fusion/adjust_controlflow_pass.h +++ b/mindspore-lite/tools/optimizer/fusion/adjust_controlflow_pass.h @@ -19,7 +19,7 @@ #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AdjustControlflowPass : public Pass { public: @@ -32,6 +32,6 @@ class AdjustControlflowPass : public Pass { int32_t AdjustControlflow(const CNodePtr &cnode, const FuncGraphPtr &func_graph); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ADJUST_CONTROLFLOW_PASS_H diff --git a/mindspore-lite/tools/optimizer/fusion/adjust_matmul_pass.cc b/mindspore-lite/tools/optimizer/fusion/adjust_matmul_pass.cc index d03efa8e..fb7d3a94 100644 --- a/mindspore-lite/tools/optimizer/fusion/adjust_matmul_pass.cc +++ b/mindspore-lite/tools/optimizer/fusion/adjust_matmul_pass.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr int32_t kShapeMinus_1 = -1; @@ -421,4 +421,4 @@ bool AdjustMatmulPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/adjust_matmul_pass.h b/mindspore-lite/tools/optimizer/fusion/adjust_matmul_pass.h index 2a865b1f..513649b7 100644 --- a/mindspore-lite/tools/optimizer/fusion/adjust_matmul_pass.h +++ b/mindspore-lite/tools/optimizer/fusion/adjust_matmul_pass.h @@ -19,7 +19,7 @@ #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AdjustMatmulPass : public Pass { public: @@ -28,6 +28,6 @@ class AdjustMatmulPass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ADJUST_MATMUL_PASS_H diff --git a/mindspore-lite/tools/optimizer/fusion/adjust_resize_dims_pass.cc b/mindspore-lite/tools/optimizer/fusion/adjust_resize_dims_pass.cc index e7563ff2..631b8bee 100644 --- a/mindspore-lite/tools/optimizer/fusion/adjust_resize_dims_pass.cc +++ b/mindspore-lite/tools/optimizer/fusion/adjust_resize_dims_pass.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr int32_t kShapeMinus_1 = -1; @@ -517,4 +517,4 @@ bool AdjustResizeDimsPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/adjust_resize_dims_pass.h b/mindspore-lite/tools/optimizer/fusion/adjust_resize_dims_pass.h index d6536ded..ea8d14da 100644 --- a/mindspore-lite/tools/optimizer/fusion/adjust_resize_dims_pass.h +++ b/mindspore-lite/tools/optimizer/fusion/adjust_resize_dims_pass.h @@ -19,7 +19,7 @@ #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AdjustResizeDimsPass : public Pass { public: @@ -28,6 +28,6 @@ class AdjustResizeDimsPass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ADJUST_RESIZE_DIMS_H diff --git a/mindspore-lite/tools/optimizer/fusion/affine_activation_fusion.cc b/mindspore-lite/tools/optimizer/fusion/affine_activation_fusion.cc index d627d93f..80bf004c 100644 --- a/mindspore-lite/tools/optimizer/fusion/affine_activation_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/affine_activation_fusion.cc @@ -25,7 +25,7 @@ #include "ops_utils/op_utils.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef AffineActivationFusion::DefinePattern() const { auto is_activation = std::make_shared(IsSpecifiedNode<&prim::kPrimActivation>); MS_CHECK_TRUE_RET(is_activation != nullptr, {}); @@ -85,4 +85,4 @@ const AnfNodePtr AffineActivationFusion::Process(const FuncGraphPtr &func_graph, return affine_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/affine_activation_fusion.h b/mindspore-lite/tools/optimizer/fusion/affine_activation_fusion.h index cad0726d..fa80def6 100644 --- a/mindspore-lite/tools/optimizer/fusion/affine_activation_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/affine_activation_fusion.h @@ -21,7 +21,7 @@ #include "schema/inner/model_generated.h" #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AffineActivationFusion : public LitePatternProcessPass { public: @@ -32,5 +32,5 @@ class AffineActivationFusion : public LitePatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_AFFINE_ACTIVATION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/affine_fusion.cc b/mindspore-lite/tools/optimizer/fusion/affine_fusion.cc index bb043793..353f7996 100644 --- a/mindspore-lite/tools/optimizer/fusion/affine_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/affine_fusion.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { constexpr auto kInputWithBiasNum = 4; constexpr auto kInputBias = 3; const BaseRef AffineFusion::DefinePattern() const { @@ -130,4 +130,4 @@ const AnfNodePtr AffineFusion::Process(const FuncGraphPtr &func_graph, const Anf MS_LOG(INFO) << "splice + matmul fused to affine node: " << affine_node->fullname_with_scope() << "success."; return affine_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/affine_fusion.h b/mindspore-lite/tools/optimizer/fusion/affine_fusion.h index 317987c5..de1013df 100644 --- a/mindspore-lite/tools/optimizer/fusion/affine_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/affine_fusion.h @@ -21,7 +21,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "schema/inner/model_generated.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AffineFusion : public LitePatternProcessPass { public: @@ -32,5 +32,5 @@ class AffineFusion : public LitePatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_AFFINE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/antiquant_add_mul_matmul_allreduce_fusion.cc b/mindspore-lite/tools/optimizer/fusion/antiquant_add_mul_matmul_allreduce_fusion.cc index eb96a84f..8bca60c3 100644 --- a/mindspore-lite/tools/optimizer/fusion/antiquant_add_mul_matmul_allreduce_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/antiquant_add_mul_matmul_allreduce_fusion.cc @@ -32,7 +32,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { std::unordered_map AntiquantAddMulMatMulAllReduceFusion::DefinePatterns() const { std::unordered_map patterns; patterns[kPatternNameAntiquantAddMulMatMulAllReduce] = DefineAntiquantAddMulMatMulAllReducePattern(); @@ -209,4 +209,4 @@ AnfNodePtr AntiquantAddMulMatMulAllReduceFusion::Process(const std::string &patt MS_LOG(INFO) << "MatMulAllReduce replace AntiquantAddMulMatMulAllReduce success"; return matmul_allreduce_cnode; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/antiquant_add_mul_matmul_allreduce_fusion.h b/mindspore-lite/tools/optimizer/fusion/antiquant_add_mul_matmul_allreduce_fusion.h index 15a4e33d..f535a0b9 100644 --- a/mindspore-lite/tools/optimizer/fusion/antiquant_add_mul_matmul_allreduce_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/antiquant_add_mul_matmul_allreduce_fusion.h @@ -22,7 +22,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class AntiquantAddMulMatMulAllReduceFusion : public MultiplePatternProcessPass { public: @@ -47,5 +47,5 @@ class AntiquantAddMulMatMulAllReduceFusion : public MultiplePatternProcessPass { const std::string kAttrNameTransposeA = "transpose_a"; const std::string kAttrNameTransposeB = "transpose_b"; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ANTIQUANT_ADD_MUL_MATMUL_ALLREDUCE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/batchmatmul_fusion.cc b/mindspore-lite/tools/optimizer/fusion/batchmatmul_fusion.cc index 5fba9935..cc8ffde2 100644 --- a/mindspore-lite/tools/optimizer/fusion/batchmatmul_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/batchmatmul_fusion.cc @@ -36,7 +36,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "ir/tensor_new.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr int64_t kFcRightInputDims = 3; constexpr float kFpPrecision = 1e-6; @@ -401,4 +401,4 @@ const AnfNodePtr BatchMatMulFusion::Process(const FuncGraphPtr &func_graph, cons MS_LOG(INFO) << "stack node:" << stack_cnode->fullname_with_scope() << " batchmatmul fusion success"; return matmul_cnode; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/batchmatmul_fusion.h b/mindspore-lite/tools/optimizer/fusion/batchmatmul_fusion.h index 4124ed03..d28bde16 100644 --- a/mindspore-lite/tools/optimizer/fusion/batchmatmul_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/batchmatmul_fusion.h @@ -20,7 +20,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "tools/converter/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class BatchMatMulFusion : public LitePatternProcessPass { public: @@ -34,5 +34,5 @@ class BatchMatMulFusion : public LitePatternProcessPass { const CNodePtr &left_slice_cnode) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_BATCHMATMUL_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/batchnorm_to_scale_fusion.cc b/mindspore-lite/tools/optimizer/fusion/batchnorm_to_scale_fusion.cc index a9cff7e3..2aa9cf28 100644 --- a/mindspore-lite/tools/optimizer/fusion/batchnorm_to_scale_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/batchnorm_to_scale_fusion.cc @@ -33,7 +33,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr float kEps = 1e-8; constexpr float kDefaultEps = 1e-5; @@ -353,4 +353,4 @@ bool BatchNormToScaleFusion::Run(const FuncGraphPtr &func_graph) { } return false; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/batchnorm_to_scale_fusion.h b/mindspore-lite/tools/optimizer/fusion/batchnorm_to_scale_fusion.h index ed197210..23877428 100644 --- a/mindspore-lite/tools/optimizer/fusion/batchnorm_to_scale_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/batchnorm_to_scale_fusion.h @@ -20,7 +20,7 @@ #include #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class BatchNormToScaleFusion : public Pass { public: BatchNormToScaleFusion() : Pass("BatchNormToScaleFusion") {} @@ -35,5 +35,5 @@ class BatchNormToScaleFusion : public Pass { }; int CalculateScaleAndBiasFromBN(const CNodePtr &bn_node, int kernel_num, float *trans_scale, float *trans_bias); -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_BATCHNORM_TO_SCALE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/cast_fusion.cc b/mindspore-lite/tools/optimizer/fusion/cast_fusion.cc index c9bec643..59a718d3 100644 --- a/mindspore-lite/tools/optimizer/fusion/cast_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/cast_fusion.cc @@ -33,7 +33,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { bool IsGoodCastSplitFusion(const FuncGraphPtr &func_graph, const CNodePtr &split_cnode_2) { auto manager = func_graph->manager(); @@ -355,4 +355,4 @@ AnfNodePtr CastFusionPass::Process(const std::string &pattern_name, const mindsp } return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/cast_fusion.h b/mindspore-lite/tools/optimizer/fusion/cast_fusion.h index c423ec94..8f073e96 100644 --- a/mindspore-lite/tools/optimizer/fusion/cast_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/cast_fusion.h @@ -22,7 +22,7 @@ #include "include/backend/optimizer/optimizer.h" #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class CastFusionPass : public MultiplePatternProcessPass { public: @@ -47,5 +47,5 @@ class CastFusionPass : public MultiplePatternProcessPass { AnfNodePtr CastCastFusion(const FuncGraphPtr &func_graph, const mindspore::AnfNodePtr &node) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TRANSPOSE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/concat_concat_fusion.cc b/mindspore-lite/tools/optimizer/fusion/concat_concat_fusion.cc index 7511906f..be71ae73 100644 --- a/mindspore-lite/tools/optimizer/fusion/concat_concat_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/concat_concat_fusion.cc @@ -23,7 +23,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool ConcatConcatFusion::Run(const FuncGraphPtr &func_graph) { MS_ASSERT(func_graph != nullptr); @@ -94,4 +94,4 @@ int ConcatConcatFusion::Process(const FuncGraphPtr &func_graph, const CNodePtr & return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/concat_concat_fusion.h b/mindspore-lite/tools/optimizer/fusion/concat_concat_fusion.h index 15b84886..eabd46b0 100644 --- a/mindspore-lite/tools/optimizer/fusion/concat_concat_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/concat_concat_fusion.h @@ -19,7 +19,7 @@ #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ConcatConcatFusion : public Pass { public: @@ -31,6 +31,6 @@ class ConcatConcatFusion : public Pass { int Process(const FuncGraphPtr &func_graph, const CNodePtr &cnode); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_CONCAT_CONCAT_FUSION_H diff --git a/mindspore-lite/tools/optimizer/fusion/conv_activation_fusion.cc b/mindspore-lite/tools/optimizer/fusion/conv_activation_fusion.cc index 730d4563..f92ae494 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_activation_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/conv_activation_fusion.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef ConvActivationFusion::DefinePattern() const { auto is_conv = std::make_shared(IsConvNode); MS_CHECK_TRUE_RET(is_conv != nullptr, {}); @@ -86,4 +86,4 @@ const AnfNodePtr ConvActivationFusion::Process(const FuncGraphPtr &func_graph, c } return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/conv_activation_fusion.h b/mindspore-lite/tools/optimizer/fusion/conv_activation_fusion.h index 611b3b0f..a1a3e04e 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_activation_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/conv_activation_fusion.h @@ -22,7 +22,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "tools/converter/cxx_api/converter_para.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ConvActivationFusion : public LitePatternProcessPass { public: @@ -40,5 +40,5 @@ class ConvActivationFusion : public LitePatternProcessPass { const std::shared_ptr param_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_CONV_ACTIVATION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/conv_biasadd_fusion.cc b/mindspore-lite/tools/optimizer/fusion/conv_biasadd_fusion.cc index da78af0a..eef16916 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_biasadd_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/conv_biasadd_fusion.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_b.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kAddWeightIndex = 2; bool IsConvExtendNode(const BaseRef &n) { @@ -236,4 +236,4 @@ const AnfNodePtr ConvBiasaddFusion::Process(const FuncGraphPtr &func_graph, cons MS_ASSERT(add_cnode != nullptr); return add_cnode->input(1); } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/conv_biasadd_fusion.h b/mindspore-lite/tools/optimizer/fusion/conv_biasadd_fusion.h index b806bd54..8724ae5f 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_biasadd_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/conv_biasadd_fusion.h @@ -20,7 +20,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "tools/converter/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ConvBiasaddFusion : public LitePatternProcessPass { public: @@ -35,5 +35,5 @@ class ConvBiasaddFusion : public LitePatternProcessPass { CNodePtr GetAddCnode(const AnfNodePtr &node) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_CONV_BIASADD_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/conv_bn_fusion.cc b/mindspore-lite/tools/optimizer/fusion/conv_bn_fusion.cc index f9cc8268..d9c34493 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_bn_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/conv_bn_fusion.cc @@ -24,7 +24,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_b.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_f.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { bool IsBatchNode(const BaseRef &n) { if (utils::isa(n)) { @@ -55,4 +55,4 @@ int ConvBatchNormFusion::InitTransParam(const CNodePtr &bn_node, int kernel_num, auto ret = CalculateScaleAndBiasFromBN(bn_node, kernel_num, trans_scale, trans_bias); return ret; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/conv_bn_fusion.h b/mindspore-lite/tools/optimizer/fusion/conv_bn_fusion.h index 229c5655..a56d5347 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_bn_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/conv_bn_fusion.h @@ -19,7 +19,7 @@ #include "tools/optimizer/fusion/conv_transform_fusion.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class ConvBatchNormFusion : public ConvTransformFusion { public: explicit ConvBatchNormFusion(FmkType fmk_type = converter::kFmkTypeMs, bool multigraph = true) @@ -32,5 +32,5 @@ class ConvBatchNormFusion : public ConvTransformFusion { private: int InitTransParam(const CNodePtr &, int, float *, float *) const override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_CONV_BN_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/conv_conv_fusion.cc b/mindspore-lite/tools/optimizer/fusion/conv_conv_fusion.cc index 29d9d2d8..0485ddb2 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_conv_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/conv_conv_fusion.cc @@ -26,7 +26,7 @@ #include "ops_utils/op_utils.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kConvNoBiasLen = 3; constexpr size_t kConvWithBiasLen = 4; @@ -322,4 +322,4 @@ const AnfNodePtr ConvConvFusion::Process(const FuncGraphPtr &func_graph, const A (void)ReplaceParametersAndNodes(func_graph, up_conv_cnode, down_conv_cnode); return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/conv_conv_fusion.h b/mindspore-lite/tools/optimizer/fusion/conv_conv_fusion.h index f9ac6419..a08d23dd 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_conv_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/conv_conv_fusion.h @@ -21,7 +21,7 @@ #include "schema/inner/model_generated.h" #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ConvConvFusion : public LitePatternProcessPass { public: @@ -34,5 +34,5 @@ class ConvConvFusion : public LitePatternProcessPass { bool CheckCanFusion(const CNodePtr &up_conv_cnode, const CNodePtr &down_conv_cnode) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_CONV_CONV_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/conv_pad_fusion.cc b/mindspore-lite/tools/optimizer/fusion/conv_pad_fusion.cc index 0851cab2..ebbaf080 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_pad_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/conv_pad_fusion.cc @@ -33,7 +33,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_p.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kPadInputsLength = 3; @@ -286,4 +286,4 @@ AnfNodePtr ConvPadFusion::Process(const std::string &pattern_name, const FuncGra return nullptr; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/conv_pad_fusion.h b/mindspore-lite/tools/optimizer/fusion/conv_pad_fusion.h index f8f9f7ca..db4ebbb8 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_pad_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/conv_pad_fusion.h @@ -22,7 +22,7 @@ #include "include/backend/optimizer/optimizer.h" #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ConvPadFusion : public MultiplePatternProcessPass { public: @@ -38,5 +38,5 @@ class ConvPadFusion : public MultiplePatternProcessPass { VectorRef DefinePadTransposeConvPattern() const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_CONV_PAD_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/conv_scale_fusion.cc b/mindspore-lite/tools/optimizer/fusion/conv_scale_fusion.cc index ea98d48a..dae43755 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_scale_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/conv_scale_fusion.cc @@ -23,7 +23,7 @@ #include "nnacl_c/op_base.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kScaleWeightIndex = 2; constexpr size_t kScaleBiasIndex = 3; @@ -97,4 +97,4 @@ int ConvScaleFusion::InitTransParam(const CNodePtr &scale_node, int kernel_num, } return lite::RET_OK; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/conv_scale_fusion.h b/mindspore-lite/tools/optimizer/fusion/conv_scale_fusion.h index d2c4012e..60b5d08a 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_scale_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/conv_scale_fusion.h @@ -19,7 +19,7 @@ #include "tools/optimizer/fusion/conv_transform_fusion.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class ConvScaleFusion : public ConvTransformFusion { public: explicit ConvScaleFusion(FmkType fmk_type = converter::kFmkTypeMs, bool multigraph = true) @@ -32,5 +32,5 @@ class ConvScaleFusion : public ConvTransformFusion { private: int InitTransParam(const CNodePtr &, int, float *, float *) const override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_CONV_SCALE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/conv_transform_fusion.cc b/mindspore-lite/tools/optimizer/fusion/conv_transform_fusion.cc index cc57a7fc..92d3bb65 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_transform_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/conv_transform_fusion.cc @@ -30,7 +30,7 @@ #include "ops_utils/op_utils.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kConvWeightIndex = 2; constexpr size_t kConvBiasIndex = 3; @@ -450,4 +450,4 @@ bool ConvTransformFusion::CheckCanFused(const FuncGraphPtr &func_graph, const CN return is_value_node || conv_weight_param != nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/conv_transform_fusion.h b/mindspore-lite/tools/optimizer/fusion/conv_transform_fusion.h index a764bb89..b313feb0 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_transform_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/conv_transform_fusion.h @@ -22,7 +22,7 @@ #include "include/registry/converter_context.h" using mindspore::converter::FmkType; -namespace mindspore::opt { +namespace mindspore::lite::opt { class ConvTransformFusion : public LitePatternProcessPass { public: explicit ConvTransformFusion(bool multigraph = true, const std::string &name = "ConvTransformFusion") @@ -45,5 +45,5 @@ class ConvTransformFusion : public LitePatternProcessPass { FmkType fmk_type_ = converter::kFmkTypeTf; bool nchw_format_ = false; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_CONV_TRANSFORM_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc b/mindspore-lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc index c9410488..cd526b49 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef ConvTupleActivationFusion::DefinePattern() const { auto is_conv = std::make_shared(IsConvNode); MS_CHECK_TRUE_RET(is_conv != nullptr, {}); @@ -99,4 +99,4 @@ const AnfNodePtr ConvTupleActivationFusion::Process(const FuncGraphPtr &func_gra } return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h b/mindspore-lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h index e2ebd675..fa5c6b38 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h @@ -20,7 +20,7 @@ #include #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ConvTupleActivationFusion : public LitePatternProcessPass { public: @@ -31,5 +31,5 @@ class ConvTupleActivationFusion : public LitePatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_CONV_TUPLE_ACTIVATION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.cc b/mindspore-lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.cc index 3e9ffd3d..b46c7b47 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.cc @@ -22,7 +22,7 @@ #include "nnacl_c/op_base.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef ConvTupleGetItemFusion::DefinePattern() const { auto is_tuple_getitem = std::make_shared(IsSpecifiedNode<&prim::kPrimTupleGetItem>); MS_CHECK_TRUE_RET(is_tuple_getitem != nullptr, {}); @@ -73,4 +73,4 @@ const AnfNodePtr ConvTupleGetItemFusion::Process(const FuncGraphPtr &func_graph, } return conv_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.h b/mindspore-lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.h index 45ca1d4d..235ea318 100644 --- a/mindspore-lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.h @@ -18,7 +18,7 @@ #include #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class ConvTupleGetItemFusion : public LitePatternProcessPass { public: explicit ConvTupleGetItemFusion(const std::string &name = "ConvTupleGetItemFusion", bool multigraph = true) @@ -27,6 +27,6 @@ class ConvTupleGetItemFusion : public LitePatternProcessPass { const BaseRef DefinePattern() const override; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_CONV_TUPLEGETITEM_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/decoder_layer_fusion.cc b/mindspore-lite/tools/optimizer/fusion/decoder_layer_fusion.cc index 24c7fd96..e0e2e679 100644 --- a/mindspore-lite/tools/optimizer/fusion/decoder_layer_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/decoder_layer_fusion.cc @@ -37,7 +37,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { const auto &p1 = std::placeholders::_1; } // namespace @@ -524,4 +524,4 @@ CNodePtr DecoderLayerFusion::CreateMaskedDecoderLayerFusionNode(const FuncGraphP new_node->set_fullname_with_scope(node->fullname_with_scope() + "/decoder_layer"); return new_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/decoder_layer_fusion.h b/mindspore-lite/tools/optimizer/fusion/decoder_layer_fusion.h index bb631cf4..0bc71684 100644 --- a/mindspore-lite/tools/optimizer/fusion/decoder_layer_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/decoder_layer_fusion.h @@ -28,7 +28,7 @@ #include "infer/cxx_api/activation.h" #include "tools/optimizer/fusion/multi_head_attention_fusion.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class DecoderLayerFusion : public MultiplePatternProcessPass { public: @@ -117,5 +117,5 @@ class DecoderLayerFusion : public MultiplePatternProcessPass { mutable bool layer_norm_{false}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_DECODER_LAYER_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/encoder_layer_fusion.cc b/mindspore-lite/tools/optimizer/fusion/encoder_layer_fusion.cc index 9d1dfc51..6be2729b 100644 --- a/mindspore-lite/tools/optimizer/fusion/encoder_layer_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/encoder_layer_fusion.cc @@ -47,7 +47,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { const auto &p1 = std::placeholders::_1; } // namespace @@ -1156,4 +1156,4 @@ CNodePtr EncoderLayerFusion::CreateMaskedEncoderLayerFusionNode(const FuncGraphP new_node->set_fullname_with_scope(node->fullname_with_scope() + "/encoder_layer"); return new_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/encoder_layer_fusion.h b/mindspore-lite/tools/optimizer/fusion/encoder_layer_fusion.h index 83483217..7d057f38 100644 --- a/mindspore-lite/tools/optimizer/fusion/encoder_layer_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/encoder_layer_fusion.h @@ -28,7 +28,7 @@ #include "infer/cxx_api/layer_norm_fusion.h" #include "infer/cxx_api/activation.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class EncoderLayerFusion : public MultiplePatternProcessPass { public: @@ -198,5 +198,5 @@ class EncoderLayerFusion : public MultiplePatternProcessPass { mutable bool embedding_layer_{false}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ENCODER_LAYER_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/expanddims_reshape_fusion.cc b/mindspore-lite/tools/optimizer/fusion/expanddims_reshape_fusion.cc index 455487af..fa33a975 100644 --- a/mindspore-lite/tools/optimizer/fusion/expanddims_reshape_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/expanddims_reshape_fusion.cc @@ -25,7 +25,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_e.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef ExpandDimsReshapeFusion::DefinePattern() const { auto is_reshape = std::make_shared(IsSpecifiedNode<&prim::kPrimReshape>); MS_CHECK_TRUE_RET(is_reshape != nullptr, {}); @@ -72,4 +72,4 @@ const AnfNodePtr ExpandDimsReshapeFusion::Process(const FuncGraphPtr &func_graph manage->SetEdge(reshape_cnode, C1NUM, expanddims_cnode->input(SECOND_INPUT)); return reshape_cnode; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/expanddims_reshape_fusion.h b/mindspore-lite/tools/optimizer/fusion/expanddims_reshape_fusion.h index eb04fb1c..550cc825 100644 --- a/mindspore-lite/tools/optimizer/fusion/expanddims_reshape_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/expanddims_reshape_fusion.h @@ -22,7 +22,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "utils/check_convert_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ExpandDimsReshapeFusion : public LitePatternProcessPass { public: @@ -36,5 +36,5 @@ class ExpandDimsReshapeFusion : public LitePatternProcessPass { bool CheckCanFuse(const FuncGraphPtr &func_graph, const AnfNodePtr &node) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_EXPANDDIMS_RESHAPE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/ffn_antiquant_fusion.cc b/mindspore-lite/tools/optimizer/fusion/ffn_antiquant_fusion.cc index eaf58248..28b204f1 100644 --- a/mindspore-lite/tools/optimizer/fusion/ffn_antiquant_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/ffn_antiquant_fusion.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr size_t kFFNAntiquantScaleInputIndex = 10; constexpr size_t kFFNWeight1InputIndex = 1; @@ -325,4 +325,4 @@ int FFNAntiquantFusion::Process(const FuncGraphPtr &func_graph, const CNodePtr & return RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/ffn_antiquant_fusion.h b/mindspore-lite/tools/optimizer/fusion/ffn_antiquant_fusion.h index 1eb7aa37..af9ec38d 100644 --- a/mindspore-lite/tools/optimizer/fusion/ffn_antiquant_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/ffn_antiquant_fusion.h @@ -20,7 +20,7 @@ #include #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class FFNAntiquantFusion : public Pass { public: @@ -39,6 +39,6 @@ class FFNAntiquantFusion : public Pass { std::vector GetScaleZpInStragety(std::vector weight_in_strategy); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_FFN_ANTIQUANT_FUSION_H diff --git a/mindspore-lite/tools/optimizer/fusion/ffn_custom_pass.cc b/mindspore-lite/tools/optimizer/fusion/ffn_custom_pass.cc index 94bcd429..70cad575 100644 --- a/mindspore-lite/tools/optimizer/fusion/ffn_custom_pass.cc +++ b/mindspore-lite/tools/optimizer/fusion/ffn_custom_pass.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr auto kNameFFNNameConf = "FFNCust"; constexpr auto kNameFFNPatternForSD = "FFNCustPatternForSD"; @@ -418,4 +418,4 @@ AnfNodePtr FFNCustomPass::Process(const std::string &patten_name, const FuncGrap return cnode; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/ffn_custom_pass.h b/mindspore-lite/tools/optimizer/fusion/ffn_custom_pass.h index 4caa14ca..0299abf2 100644 --- a/mindspore-lite/tools/optimizer/fusion/ffn_custom_pass.h +++ b/mindspore-lite/tools/optimizer/fusion/ffn_custom_pass.h @@ -24,7 +24,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class FFNCustomPass : public MultiplePatternProcessPass { public: @@ -48,5 +48,5 @@ class FFNCustomPass : public MultiplePatternProcessPass { op_attrs_type op_attrs_map_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_FFN_CUSTOM_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/ffn_fusion.cc b/mindspore-lite/tools/optimizer/fusion/ffn_fusion.cc index 91cdb1cc..83e29e16 100644 --- a/mindspore-lite/tools/optimizer/fusion/ffn_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/ffn_fusion.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr int kStructureNum = 2; @@ -311,4 +311,4 @@ AnfNodePtr FFNFusion::Process(const std::string &pattern_name, const mindspore:: return cnode; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/ffn_fusion.h b/mindspore-lite/tools/optimizer/fusion/ffn_fusion.h index 22175fa8..fb195358 100644 --- a/mindspore-lite/tools/optimizer/fusion/ffn_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/ffn_fusion.h @@ -23,7 +23,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { enum PatternType { kDynamicDims, @@ -69,6 +69,6 @@ class FFNFusion : public MultiplePatternProcessPass { mutable VarPtr matmul2_b_[kMaxPatternNum] = {nullptr}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_FFN_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.cc b/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.cc index c77bf63a..8b4e3591 100644 --- a/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_i.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace { constexpr size_t kFlashAttentionAntiquantScaleInputIndex = 11; } // namespace @@ -230,4 +230,4 @@ int FlashAttentionAntiquantFusion::Process(const FuncGraphPtr &func_graph, const return RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.h b/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.h index 6f5ed2f5..3812dc0e 100644 --- a/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/flash_attention_antiquant_fusion.h @@ -20,7 +20,7 @@ #include #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class FlashAttentionAntiquantFusion : public Pass { public: @@ -36,6 +36,6 @@ class FlashAttentionAntiquantFusion : public Pass { const ParameterPtr param_node_2, std::string name); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_FFN_ANTIQUANT_FUSION_H diff --git a/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion.cc b/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion.cc index d06afc0c..c771530a 100644 --- a/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion.cc @@ -40,7 +40,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { static int kNameIndex = 0; constexpr auto kNameFlashAttentionPatternForMsSD21 = "FlashAttentionPatternForMsSD21"; @@ -3032,4 +3032,4 @@ AnfNodePtr FlashAttentionFusion::Process(const std::string &patten_name, const F << flash_attention_node->fullname_with_scope(); return flash_attention_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion.h b/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion.h index 6bf5696f..13e20f0a 100644 --- a/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion.h @@ -24,7 +24,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { struct FlashAttentionParm { string input_layout = "BNSD"; @@ -229,5 +229,5 @@ class FlashAttentionFusion : public MultiplePatternProcessPass { static std::string soc_version_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_FLASH_ATTENTION_BASE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion_for_custom.cc b/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion_for_custom.cc index fbcdb17b..a7892438 100644 --- a/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion_for_custom.cc +++ b/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion_for_custom.cc @@ -34,7 +34,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kNumInputSize1 = 1; constexpr size_t kNumInputSize2 = 2; @@ -446,4 +446,4 @@ AnfNodePtr FlashAttentionFusionForCustom::Process(const std::string &patten_name MS_LOG(INFO) << "Flash attention fusion success."; return cnode; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion_for_custom.h b/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion_for_custom.h index 1094c4e9..ae6a7c31 100644 --- a/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion_for_custom.h +++ b/mindspore-lite/tools/optimizer/fusion/flash_attention_fusion_for_custom.h @@ -23,7 +23,7 @@ #include #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { /* * @@ -84,5 +84,5 @@ class FlashAttentionFusionForCustom : public MultiplePatternProcessPass { std::map> disable_pattern_names_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_FLASH_ATTENTION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/flash_attention_tik_fusion.cc b/mindspore-lite/tools/optimizer/fusion/flash_attention_tik_fusion.cc index 634b7a89..95287785 100644 --- a/mindspore-lite/tools/optimizer/fusion/flash_attention_tik_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/flash_attention_tik_fusion.cc @@ -23,7 +23,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_p.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kInputIndex1 = 1; @@ -148,4 +148,4 @@ bool FlashAttentionTikPass::Run(const FuncGraphPtr &func_graph) { } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/flash_attention_tik_fusion.h b/mindspore-lite/tools/optimizer/fusion/flash_attention_tik_fusion.h index 1afc4eb2..1199b726 100644 --- a/mindspore-lite/tools/optimizer/fusion/flash_attention_tik_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/flash_attention_tik_fusion.h @@ -24,7 +24,7 @@ #include "tools/optimizer/common/format_utils.h" using mindspore::converter::FmkType; -namespace mindspore { +namespace mindspore::lite { namespace opt { class FlashAttentionTikPass : public Pass { public: @@ -33,6 +33,6 @@ class FlashAttentionTikPass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_FATik_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/fullconnected_add_fusion.cc b/mindspore-lite/tools/optimizer/fusion/fullconnected_add_fusion.cc index 614be3ad..fb214598 100644 --- a/mindspore-lite/tools/optimizer/fusion/fullconnected_add_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/fullconnected_add_fusion.cc @@ -29,7 +29,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_b.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_f.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { bool IsPrimitiveProper(const CNodePtr &add_cnode, const CNodePtr &fc_cnode, int index) { @@ -202,4 +202,4 @@ AnfNodePtr FullconnectedAddFusion::Process(const std::string &pattern_name, cons return nullptr; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/fullconnected_add_fusion.h b/mindspore-lite/tools/optimizer/fusion/fullconnected_add_fusion.h index 87abb014..3f2d77ae 100644 --- a/mindspore-lite/tools/optimizer/fusion/fullconnected_add_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/fullconnected_add_fusion.h @@ -21,7 +21,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class FullconnectedAddFusion : public MultiplePatternProcessPass { public: explicit FullconnectedAddFusion(const std::string &name = "FullconnectedAddFusion", bool multigraph = true) @@ -35,5 +35,5 @@ class FullconnectedAddFusion : public MultiplePatternProcessPass { VectorRef DefineFcAddFusionPattern() const; VectorRef DefineFcBiasAddPattern() const; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_FULLCONNECTED_ADD_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/fullconnected_fusion.cc b/mindspore-lite/tools/optimizer/fusion/fullconnected_fusion.cc index e31920fc..52e45c17 100644 --- a/mindspore-lite/tools/optimizer/fusion/fullconnected_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/fullconnected_fusion.cc @@ -27,7 +27,7 @@ #include "ops_utils/op_utils.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_f.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kFcWeightIndex = 2; constexpr size_t kFcParameterDims = 2; @@ -284,4 +284,4 @@ const AnfNodePtr FullConnectedFusion::Process(const FuncGraphPtr &func_graph, co MS_LOG(INFO) << curr_cnode->fullname_with_scope() << " fusion success"; return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/fullconnected_fusion.h b/mindspore-lite/tools/optimizer/fusion/fullconnected_fusion.h index 29d52655..a49ad2ea 100644 --- a/mindspore-lite/tools/optimizer/fusion/fullconnected_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/fullconnected_fusion.h @@ -22,7 +22,7 @@ #include "tools/optimizer/fusion/conv_transform_fusion.h" #include "tools/optimizer/common/multiple_pattern_process_pass.h" #include "schema/inner/model_generated.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class FullConnectedFusion : public LitePatternProcessPass { public: explicit FullConnectedFusion(bool multigraph = true) : LitePatternProcessPass("FullConnectedFusion", multigraph) {} @@ -30,5 +30,5 @@ class FullConnectedFusion : public LitePatternProcessPass { const BaseRef DefinePattern() const override; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_FULLCONNECTED_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/gegluv2_fusion.cc b/mindspore-lite/tools/optimizer/fusion/gegluv2_fusion.cc index 7f7f93e3..bb965a72 100644 --- a/mindspore-lite/tools/optimizer/fusion/gegluv2_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/gegluv2_fusion.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr auto kNameGeGluV2Pattern = "GeGluV2Pattern"; @@ -147,4 +147,4 @@ AnfNodePtr GeGluV2Fusion::Process(const std::string &patten_name, const FuncGrap return GeGluV2_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/gegluv2_fusion.h b/mindspore-lite/tools/optimizer/fusion/gegluv2_fusion.h index e962810b..a111479f 100644 --- a/mindspore-lite/tools/optimizer/fusion/gegluv2_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/gegluv2_fusion.h @@ -23,7 +23,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class GeGluV2Fusion : public MultiplePatternProcessPass { public: @@ -61,5 +61,5 @@ class GeGluV2Fusion : public MultiplePatternProcessPass { const VectorRef DefineGeGluV2Pattern() const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_GEGLUV2_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/gelu_fusion.cc b/mindspore-lite/tools/optimizer/fusion/gelu_fusion.cc index 92b0f7f7..753cbc05 100644 --- a/mindspore-lite/tools/optimizer/fusion/gelu_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/gelu_fusion.cc @@ -23,7 +23,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { CNodePtr GeLUFusion::CreateGeLUNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &equiv) const { @@ -91,4 +91,4 @@ AnfNodePtr GeLUFusion::Process(const std::string &pattern_name, const mindspore: return cnode; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/gelu_fusion.h b/mindspore-lite/tools/optimizer/fusion/gelu_fusion.h index ae0ef979..f248fd5f 100644 --- a/mindspore-lite/tools/optimizer/fusion/gelu_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/gelu_fusion.h @@ -23,7 +23,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class GeLUFusion : public MultiplePatternProcessPass { public: @@ -46,6 +46,6 @@ class GeLUFusion : public MultiplePatternProcessPass { mutable bool approximate_{false}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_GELU_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/glu_fusion.cc b/mindspore-lite/tools/optimizer/fusion/glu_fusion.cc index 2e4dcddb..3c9c1cd0 100644 --- a/mindspore-lite/tools/optimizer/fusion/glu_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/glu_fusion.cc @@ -29,7 +29,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { CNodePtr GLUFusion::CreateGLUNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &equiv) const { MS_ASSERT(func_graph != nullptr && node != nullptr && equiv != nullptr); @@ -108,4 +108,4 @@ const AnfNodePtr GLUFusion::Process(const FuncGraphPtr &func_graph, const AnfNod return cnode; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/glu_fusion.h b/mindspore-lite/tools/optimizer/fusion/glu_fusion.h index 5e6a7e79..143ada0a 100644 --- a/mindspore-lite/tools/optimizer/fusion/glu_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/glu_fusion.h @@ -22,7 +22,7 @@ #include "include/common/utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class GLUFusion : public LitePatternProcessPass { public: @@ -44,6 +44,6 @@ class GLUFusion : public LitePatternProcessPass { mutable VarPtr split_prim_ = nullptr; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_GLU_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/gnbmm_pass.cc b/mindspore-lite/tools/optimizer/fusion/gnbmm_pass.cc index 3beed847..002ae387 100644 --- a/mindspore-lite/tools/optimizer/fusion/gnbmm_pass.cc +++ b/mindspore-lite/tools/optimizer/fusion/gnbmm_pass.cc @@ -29,7 +29,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr auto kNameGNBMMPatternForSDXL = "GNBMMPatternForSDXL"; @@ -309,4 +309,4 @@ AnfNodePtr GNBMMPass::Process(const std::string &patten_name, const FuncGraphPtr return gnbmm_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/gnbmm_pass.h b/mindspore-lite/tools/optimizer/fusion/gnbmm_pass.h index a5603d4c..0ebbb66e 100644 --- a/mindspore-lite/tools/optimizer/fusion/gnbmm_pass.h +++ b/mindspore-lite/tools/optimizer/fusion/gnbmm_pass.h @@ -24,7 +24,7 @@ #include "tools/optimizer/common/multiple_pattern_process_pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class GNBMMPass : public MultiplePatternProcessPass { @@ -44,5 +44,5 @@ class GNBMMPass : public MultiplePatternProcessPass { }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_GNBMM_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/gnsnz_pass.cc b/mindspore-lite/tools/optimizer/fusion/gnsnz_pass.cc index 86f4d8bb..3357a369 100644 --- a/mindspore-lite/tools/optimizer/fusion/gnsnz_pass.cc +++ b/mindspore-lite/tools/optimizer/fusion/gnsnz_pass.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr auto kNameGNSNZPatternForSD15 = "GNSNZPatternForSD15"; constexpr auto kNameGNSNZPatternForSD15WithoutSilu = "GNSNZPatternForSD15WithoutSilu"; @@ -301,4 +301,4 @@ AnfNodePtr GNSNZPass::Process(const std::string &patten_name, const FuncGraphPtr return groupnormsilu_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/gnsnz_pass.h b/mindspore-lite/tools/optimizer/fusion/gnsnz_pass.h index 0306df61..cc31fe3e 100644 --- a/mindspore-lite/tools/optimizer/fusion/gnsnz_pass.h +++ b/mindspore-lite/tools/optimizer/fusion/gnsnz_pass.h @@ -24,7 +24,7 @@ #include "tools/optimizer/common/multiple_pattern_process_pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class GNSNZPass : public MultiplePatternProcessPass { public: @@ -38,5 +38,5 @@ class GNSNZPass : public MultiplePatternProcessPass { AnfNodePtr Process(const std::string &, const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_GNSNZ_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/graph_split_pass.cc b/mindspore-lite/tools/optimizer/fusion/graph_split_pass.cc index 6ac2cab1..38df133f 100644 --- a/mindspore-lite/tools/optimizer/fusion/graph_split_pass.cc +++ b/mindspore-lite/tools/optimizer/fusion/graph_split_pass.cc @@ -26,7 +26,7 @@ #include "infer/make_tuple.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kTargetNodeSize = 2; } @@ -690,4 +690,4 @@ bool GraphSplitPass::Run(const FuncGraphPtr &original_graph) { original_graph->set_attr("subgraphs", MakeValue>(subgraphs)); return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/graph_split_pass.h b/mindspore-lite/tools/optimizer/fusion/graph_split_pass.h index ceaca68a..129dbfbc 100644 --- a/mindspore-lite/tools/optimizer/fusion/graph_split_pass.h +++ b/mindspore-lite/tools/optimizer/fusion/graph_split_pass.h @@ -24,7 +24,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "tools/converter/cxx_api/converter_para.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class GraphSplitPass : public Pass { public: @@ -36,5 +36,5 @@ class GraphSplitPass : public Pass { std::shared_ptr param_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_GRAPH_SPLIT_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/groupnorm_fusion.cc b/mindspore-lite/tools/optimizer/fusion/groupnorm_fusion.cc index 295d33cb..0c1e8fb8 100644 --- a/mindspore-lite/tools/optimizer/fusion/groupnorm_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/groupnorm_fusion.cc @@ -34,7 +34,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { STATUS GetAxis(const BaseRef &n, std::vector *axes) { @@ -336,4 +336,4 @@ const BaseRef GroupNormFusion::DefinePattern() const { return add2_ref; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/groupnorm_fusion.h b/mindspore-lite/tools/optimizer/fusion/groupnorm_fusion.h index 0b3db8bb..8665618f 100644 --- a/mindspore-lite/tools/optimizer/fusion/groupnorm_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/groupnorm_fusion.h @@ -25,7 +25,7 @@ #include "include/common/utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { /// fuse layer_norm or instance_norm into one operator class GroupNormFusion : public LitePatternProcessPass { @@ -58,6 +58,6 @@ class GroupNormFusion : public LitePatternProcessPass { mutable VarPtr real_div_divider_ = nullptr; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_GROUPNORM_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/groupnormsilu_fusion.cc b/mindspore-lite/tools/optimizer/fusion/groupnormsilu_fusion.cc index 1e7fe1cc..866fab90 100644 --- a/mindspore-lite/tools/optimizer/fusion/groupnormsilu_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/groupnormsilu_fusion.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr auto kNameGroupNormSiluPatternForSD15 = "GroupNormSiluPatternForSD15"; constexpr auto kNameGroupNormSiluPatternForSDWithCast = "GroupNormSiluPatternForSDWithCast"; @@ -671,4 +671,4 @@ AnfNodePtr GroupNormSiluFusion::Process(const std::string &patten_name, const Fu MS_LOG(INFO) << "GroupNormSilu node fusion success, fusion node name: " << groupnormsilu_node->fullname_with_scope(); return groupnormsilu_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/groupnormsilu_fusion.h b/mindspore-lite/tools/optimizer/fusion/groupnormsilu_fusion.h index 0eb23c72..e6065cd2 100644 --- a/mindspore-lite/tools/optimizer/fusion/groupnormsilu_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/groupnormsilu_fusion.h @@ -23,7 +23,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class GroupNormSiluFusion : public MultiplePatternProcessPass { public: @@ -100,5 +100,5 @@ class GroupNormSiluFusion : public MultiplePatternProcessPass { const VectorRef DefineGroupNormSiluPatternForGroupNorm() const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_GROUPNORMSILU_BASE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/hard_swish_fusion.cc b/mindspore-lite/tools/optimizer/fusion/hard_swish_fusion.cc index 6a7e7088..c997225a 100644 --- a/mindspore-lite/tools/optimizer/fusion/hard_swish_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/hard_swish_fusion.cc @@ -23,7 +23,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_d.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr float kHSwishAddConst = 3.0; @@ -150,4 +150,4 @@ bool HardSwishFusion::CheckPattern(const EquivPtr &equiv) const { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/hard_swish_fusion.h b/mindspore-lite/tools/optimizer/fusion/hard_swish_fusion.h index d1d2d8e3..7789d66e 100644 --- a/mindspore-lite/tools/optimizer/fusion/hard_swish_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/hard_swish_fusion.h @@ -23,7 +23,7 @@ #include "include/common/utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { /// fuse hard swish into one operator class HardSwishFusion : public LitePatternProcessPass { @@ -45,6 +45,6 @@ class HardSwishFusion : public LitePatternProcessPass { mutable VarPtr div_const_ = nullptr; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_HARD_SWISH_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_assign_fusion.cc b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_assign_fusion.cc index 5a263e75..972f468e 100644 --- a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_assign_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_assign_fusion.cc @@ -29,7 +29,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_k.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { int KVCacheMgrAssignFusion::RemoveAssignOp(const AnfNodePtr &anf_node, const FuncGraphManagerPtr &manager, const CNodePtr &kv_cache_cnode) { @@ -96,4 +96,4 @@ bool KVCacheMgrAssignFusion::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_assign_fusion.h b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_assign_fusion.h index 16812e99..31d7b686 100644 --- a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_assign_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_assign_fusion.h @@ -22,7 +22,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class KVCacheMgrAssignFusion : public Pass { public: @@ -37,5 +37,5 @@ class KVCacheMgrAssignFusion : public Pass { std::set remove_cnode_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_KV_CACHE_MGR_ASSIGN_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_concat_fusion.cc b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_concat_fusion.cc index 667eb011..9cbab172 100644 --- a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_concat_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_concat_fusion.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_k.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { AnfNodePtr KVCacheMgrConcatFusion::GetBatchValidLength(CNodePtr concat_cnode) { auto make_tuple_node = concat_cnode->input(kInputIndexOne); @@ -86,4 +86,4 @@ bool KVCacheMgrConcatFusion::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_concat_fusion.h b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_concat_fusion.h index 86f976f4..feef5324 100644 --- a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_concat_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_concat_fusion.h @@ -21,7 +21,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class KVCacheMgrConcatFusion : public Pass { public: @@ -33,5 +33,5 @@ class KVCacheMgrConcatFusion : public Pass { AnfNodePtr GetBatchValidLength(CNodePtr concat_cnode); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_KV_CACHE_MGR_CONCAT_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_load_fusion.cc b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_load_fusion.cc index 045a3df4..6d487fbc 100644 --- a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_load_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_load_fusion.cc @@ -29,7 +29,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_l.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { int KVCacheMgrLoadFusion::RemoveLoadOp(const AnfNodePtr &anf_node, const FuncGraphManagerPtr &manager, const CNodePtr &kv_cache_cnode) { @@ -94,4 +94,4 @@ bool KVCacheMgrLoadFusion::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_load_fusion.h b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_load_fusion.h index 3c5909ee..901d1ddb 100644 --- a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_load_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_load_fusion.h @@ -22,7 +22,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class KVCacheMgrLoadFusion : public Pass { public: @@ -37,5 +37,5 @@ class KVCacheMgrLoadFusion : public Pass { std::set remove_cnode_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_KV_CACHE_MGR_LOAD_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_one_branch_fusion.cc b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_one_branch_fusion.cc index a3210737..0e6be98b 100644 --- a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_one_branch_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_one_branch_fusion.cc @@ -39,7 +39,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "ir/tensor_new.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef KVCacheMgrOneBranchFusion::DefinePattern() const { if (!InitVar()) { MS_LOG(ERROR) << "initial member failed."; @@ -180,4 +180,4 @@ const AnfNodePtr KVCacheMgrOneBranchFusion::Process(const FuncGraphPtr &func_gra return kv_cache_cnode; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_one_branch_fusion.h b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_one_branch_fusion.h index 0520f0bd..7c57a84b 100644 --- a/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_one_branch_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/kv_cache_mgr_one_branch_fusion.h @@ -20,7 +20,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "schema/inner/model_generated.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class KVCacheMgrOneBranchFusion : public LitePatternProcessPass { public: @@ -41,6 +41,6 @@ class KVCacheMgrOneBranchFusion : public LitePatternProcessPass { mutable VarPtr input_2_key_past_ = nullptr; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif diff --git a/mindspore-lite/tools/optimizer/fusion/leaky_relu_fusion.cc b/mindspore-lite/tools/optimizer/fusion/leaky_relu_fusion.cc index fedb5bf1..e62c6934 100644 --- a/mindspore-lite/tools/optimizer/fusion/leaky_relu_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/leaky_relu_fusion.cc @@ -27,7 +27,7 @@ #include "infer/leaky_relu.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kNumIndex1 = 1; constexpr size_t kNumIndex2 = 2; @@ -140,4 +140,4 @@ const AnfNodePtr LeakyReluFusion::Process(const FuncGraphPtr &func_graph, const return activate_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/leaky_relu_fusion.h b/mindspore-lite/tools/optimizer/fusion/leaky_relu_fusion.h index 8dfc70f8..1b1b53fe 100644 --- a/mindspore-lite/tools/optimizer/fusion/leaky_relu_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/leaky_relu_fusion.h @@ -19,7 +19,7 @@ #include #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class LeakyReluFusion : public LitePatternProcessPass { public: explicit LeakyReluFusion(bool multigraph = true, const std::string &name = "LeakyReluFusion") @@ -30,6 +30,6 @@ class LeakyReluFusion : public LitePatternProcessPass { const BaseRef DefinePattern() const override; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif diff --git a/mindspore-lite/tools/optimizer/fusion/matmul_activation_fusion.cc b/mindspore-lite/tools/optimizer/fusion/matmul_activation_fusion.cc index 918b634e..3dfbbfd2 100644 --- a/mindspore-lite/tools/optimizer/fusion/matmul_activation_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/matmul_activation_fusion.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef MatMulActivationFusion::DefinePattern() const { auto is_matmul = std::make_shared(IsSpecifiedNode<&prim::kPrimMatMulFusion>); MS_CHECK_TRUE_RET(is_matmul != nullptr, {}); @@ -91,4 +91,4 @@ const AnfNodePtr MatMulActivationFusion::Process(const FuncGraphPtr &func_graph, manage->Replace(act_cnode, matmul_cnode); return matmul_cnode; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/matmul_activation_fusion.h b/mindspore-lite/tools/optimizer/fusion/matmul_activation_fusion.h index d14b170b..f31952a9 100644 --- a/mindspore-lite/tools/optimizer/fusion/matmul_activation_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/matmul_activation_fusion.h @@ -23,7 +23,7 @@ #include "tools/converter/converter_context.h" #include "tools/converter/cxx_api/converter_para.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class MatMulActivationFusion : public LitePatternProcessPass { public: @@ -37,5 +37,5 @@ class MatMulActivationFusion : public LitePatternProcessPass { const std::shared_ptr param_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_MATMUL_ACTIVATION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/matmul_add_fusion.cc b/mindspore-lite/tools/optimizer/fusion/matmul_add_fusion.cc index bd404bec..f8ffe928 100644 --- a/mindspore-lite/tools/optimizer/fusion/matmul_add_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/matmul_add_fusion.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { bool IsPrimitiveProper(const CNodePtr &add_cnode, const CNodePtr &matmul_cnode, int index) { @@ -183,4 +183,4 @@ bool MatMulAddFusion::Run(const FuncGraphPtr &func_graph) { return false; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/matmul_add_fusion.h b/mindspore-lite/tools/optimizer/fusion/matmul_add_fusion.h index ce875bd0..d868977a 100644 --- a/mindspore-lite/tools/optimizer/fusion/matmul_add_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/matmul_add_fusion.h @@ -21,7 +21,7 @@ #include #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class MatMulAddFusion : public Pass { public: @@ -30,5 +30,5 @@ class MatMulAddFusion : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_MATMUL_ADD_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/matmul_allreduce_fusion.cc b/mindspore-lite/tools/optimizer/fusion/matmul_allreduce_fusion.cc index 44bd6882..0ab01fef 100644 --- a/mindspore-lite/tools/optimizer/fusion/matmul_allreduce_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/matmul_allreduce_fusion.cc @@ -35,7 +35,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_q.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { std::unordered_map MatMulAllReduceFusion::DefinePatterns() const { std::unordered_map patterns; patterns[kPatternNameMatMulAllReduce] = DefineMatMulAllReducePattern(); @@ -410,4 +410,4 @@ AnfNodePtr MatMulAllReduceFusion::Process(const std::string &pattern_name, const MS_LOG(DEBUG) << "MatMulAllReduce replace success"; return matmul_allreduce_cnode; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/matmul_allreduce_fusion.h b/mindspore-lite/tools/optimizer/fusion/matmul_allreduce_fusion.h index 9394d35f..10bbb408 100644 --- a/mindspore-lite/tools/optimizer/fusion/matmul_allreduce_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/matmul_allreduce_fusion.h @@ -22,7 +22,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class MatMulAllReduceFusion : public MultiplePatternProcessPass { public: @@ -57,5 +57,5 @@ class MatMulAllReduceFusion : public MultiplePatternProcessPass { const std::string kAttrNameTransposeB = "transpose_b"; const std::string kAttrNameNeedFusedXoffsetToBias = "need_fused_x_offset_to_bias"; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_MATMUL_ALLREDUCE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/matmul_mul_fusion.cc b/mindspore-lite/tools/optimizer/fusion/matmul_mul_fusion.cc index 78cc55c3..08633b05 100644 --- a/mindspore-lite/tools/optimizer/fusion/matmul_mul_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/matmul_mul_fusion.cc @@ -26,7 +26,7 @@ #include "ops_utils/op_utils.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr int64_t KMatmulWeightDims = 2; constexpr size_t kMatMulNonBatchDims = 2; @@ -245,4 +245,4 @@ const AnfNodePtr MatMulMulFusion::Process(const FuncGraphPtr &func_graph, const (void)manager->Replace(mul_cnode, mul_cnode->input(1)); return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/matmul_mul_fusion.h b/mindspore-lite/tools/optimizer/fusion/matmul_mul_fusion.h index 3b43b8f4..afa61879 100644 --- a/mindspore-lite/tools/optimizer/fusion/matmul_mul_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/matmul_mul_fusion.h @@ -20,7 +20,7 @@ #include #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class MatMulMulFusion : public LitePatternProcessPass { public: explicit MatMulMulFusion(bool multigraph = true, const std::string &name = "MatMulMulFusion") @@ -29,5 +29,5 @@ class MatMulMulFusion : public LitePatternProcessPass { const BaseRef DefinePattern() const override; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_MATMUL_MUL_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/matmul_scale_fusion.cc b/mindspore-lite/tools/optimizer/fusion/matmul_scale_fusion.cc index 1f5ea603..7d8c5d43 100644 --- a/mindspore-lite/tools/optimizer/fusion/matmul_scale_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/matmul_scale_fusion.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kMatMulNonBatchDims = 2; } // namespace @@ -121,4 +121,4 @@ const BaseRef MatMulScaleFusion::DefinePattern() const { MS_CHECK_TRUE_RET(is_seq_var != nullptr, {}); return VectorRef({is_scale, is_fc, is_param, is_seq_var}); } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/matmul_scale_fusion.h b/mindspore-lite/tools/optimizer/fusion/matmul_scale_fusion.h index 879555f8..6b1b780e 100644 --- a/mindspore-lite/tools/optimizer/fusion/matmul_scale_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/matmul_scale_fusion.h @@ -24,7 +24,7 @@ #include "tools/optimizer/fusion/scale_base_fusion.h" #include "mindspore/ops/op_def/auto_generate/gen_lite_ops.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class MatMulScaleFusion : public ScaleBaseFusion { public: explicit MatMulScaleFusion(bool multigraph = true) : ScaleBaseFusion("MatMulScaleFusion", multigraph) {} @@ -39,5 +39,5 @@ class MatMulScaleFusion : public ScaleBaseFusion { int CalNewScaleImpl(float *curr_weight_data, std::vector prev_weight_shape, float *prev_weight_data, const AnfNodePtr &prim) const override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_MATMUL_SCALE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/mul_activation_fusion.cc b/mindspore-lite/tools/optimizer/fusion/mul_activation_fusion.cc index b10a2587..915317fb 100644 --- a/mindspore-lite/tools/optimizer/fusion/mul_activation_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/mul_activation_fusion.cc @@ -25,7 +25,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef MulActivationFusion::DefinePattern() const { auto is_act = std::make_shared(IsSpecifiedNode<&prim::kPrimActivation>); MS_CHECK_TRUE_RET(is_act != nullptr, {}); @@ -85,4 +85,4 @@ const AnfNodePtr MulActivationFusion::Process(const FuncGraphPtr &func_graph, co (void)manager->Replace(act_cnode, mul_node); return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/mul_activation_fusion.h b/mindspore-lite/tools/optimizer/fusion/mul_activation_fusion.h index 487e30b3..c6d5dacf 100644 --- a/mindspore-lite/tools/optimizer/fusion/mul_activation_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/mul_activation_fusion.h @@ -20,7 +20,7 @@ #include #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class MulActivationFusion : public LitePatternProcessPass { public: explicit MulActivationFusion(bool multigraph = true, const std::string &name = "MulActivationFusion") @@ -31,5 +31,5 @@ class MulActivationFusion : public LitePatternProcessPass { const BaseRef DefinePattern() const override; const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_MUL_ACTIVATION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/mul_add_fusion.cc b/mindspore-lite/tools/optimizer/fusion/mul_add_fusion.cc index 68d334b4..a332145e 100644 --- a/mindspore-lite/tools/optimizer/fusion/mul_add_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/mul_add_fusion.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { VectorRef MulAddFusion::DefineMulFirstPattern() const { auto is_mul = std::make_shared(IsSpecifiedNode<&prim::kPrimMulFusion>); MS_CHECK_TRUE_RET(is_mul != nullptr, {}); @@ -303,4 +303,4 @@ AnfNodePtr MulAddFusion::Process(const std::string &pattern_name, const mindspor scale_node->set_abstract(add_cnode->abstract()); return scale_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/mul_add_fusion.h b/mindspore-lite/tools/optimizer/fusion/mul_add_fusion.h index 21e1a2fc..9d848f90 100644 --- a/mindspore-lite/tools/optimizer/fusion/mul_add_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/mul_add_fusion.h @@ -26,7 +26,7 @@ #include "infer/cxx_api/scale_fusion.h" #include "utils/check_convert_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class MulAddFusion : public MultiplePatternProcessPass { public: @@ -59,5 +59,5 @@ class MulAddFusion : public MultiplePatternProcessPass { mutable ActivationType scale_act_type_ = ActivationType::NO_ACTIVATION; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_PASS_FUSION_CONV_ACTIVATION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/mul_reduce_fusion.cc b/mindspore-lite/tools/optimizer/fusion/mul_reduce_fusion.cc index bc1025b2..0e9247db 100644 --- a/mindspore-lite/tools/optimizer/fusion/mul_reduce_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/mul_reduce_fusion.cc @@ -35,7 +35,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr int kReciprocalFirstIndex = -1; @@ -493,4 +493,4 @@ bool MulReduceFusion::CheckConcatOp(const FuncGraphPtr &func_graph, const CNodeP return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/mul_reduce_fusion.h b/mindspore-lite/tools/optimizer/fusion/mul_reduce_fusion.h index a2842fc1..5a8c5f39 100644 --- a/mindspore-lite/tools/optimizer/fusion/mul_reduce_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/mul_reduce_fusion.h @@ -23,7 +23,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/graph/preprocess_dynamic_shape.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class MulReduceFusion : public Pass { public: @@ -58,5 +58,5 @@ class MulReduceFusion : public Pass { squeeze_infos_; // record generated-squeeze(>) which is used to post-fusion. }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_MUL_REDUCE_FUSION_H diff --git a/mindspore-lite/tools/optimizer/fusion/multi_head_attention_fusion.cc b/mindspore-lite/tools/optimizer/fusion/multi_head_attention_fusion.cc index cebc52ee..a4443491 100644 --- a/mindspore-lite/tools/optimizer/fusion/multi_head_attention_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/multi_head_attention_fusion.cc @@ -44,7 +44,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "ir/tensor_new.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { const auto &p1 = std::placeholders::_1; const size_t kWeightShapeSize = 2; @@ -1102,4 +1102,4 @@ CNodePtr MultiHeadAttentionFusion::CreateMaskedMultiHeadAttentionNode(const Func (void)RemoveRedundantInput(func_graph, redundant); return ret_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/multi_head_attention_fusion.h b/mindspore-lite/tools/optimizer/fusion/multi_head_attention_fusion.h index e467d917..2af366d8 100644 --- a/mindspore-lite/tools/optimizer/fusion/multi_head_attention_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/multi_head_attention_fusion.h @@ -26,7 +26,7 @@ #include "include/errorcode.h" #include "infer/attention.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class MultiHeadAttentionFusion : public MultiplePatternProcessPass { public: @@ -130,5 +130,5 @@ class MultiHeadAttentionFusion : public MultiplePatternProcessPass { mutable float scale_{true}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_MULTI_HEAD_ATTENTION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/norm_fusion.cc b/mindspore-lite/tools/optimizer/fusion/norm_fusion.cc index b4ab07f0..ab0e48a4 100644 --- a/mindspore-lite/tools/optimizer/fusion/norm_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/norm_fusion.cc @@ -38,7 +38,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { STATUS GetReduceAxes(const BaseRef &n, std::vector *axes) { @@ -648,4 +648,4 @@ const BaseRef OnnxLayerNormFusion2::DefinePattern() const { return add2_ref; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/norm_fusion.h b/mindspore-lite/tools/optimizer/fusion/norm_fusion.h index 5f0cdd58..aa644c3e 100644 --- a/mindspore-lite/tools/optimizer/fusion/norm_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/norm_fusion.h @@ -26,7 +26,7 @@ #include "include/common/utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { /// fuse layer_norm or instance_norm into one operator @@ -106,6 +106,6 @@ class OnnxLayerNormFusion2 : public NormFusion { const BaseRef DefinePattern() const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_NORM_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/onnx_gelu_fusion.cc b/mindspore-lite/tools/optimizer/fusion/onnx_gelu_fusion.cc index 9a78a2f2..b842cfaf 100644 --- a/mindspore-lite/tools/optimizer/fusion/onnx_gelu_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/onnx_gelu_fusion.cc @@ -25,7 +25,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_e.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr int kStructureNum = 2; @@ -127,4 +127,4 @@ bool OnnxGeLUFusion::CheckPattern(const std::string &pattern_name, const EquivPt return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/onnx_gelu_fusion.h b/mindspore-lite/tools/optimizer/fusion/onnx_gelu_fusion.h index 69b815c7..394aa295 100644 --- a/mindspore-lite/tools/optimizer/fusion/onnx_gelu_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/onnx_gelu_fusion.h @@ -23,7 +23,7 @@ #include #include "tools/optimizer/fusion/gelu_fusion.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class OnnxGeLUFusion : public GeLUFusion { public: @@ -47,6 +47,6 @@ class OnnxGeLUFusion : public GeLUFusion { mutable std::vector mul1_y_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_ONNX_GELU_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/prelu_fusion.cc b/mindspore-lite/tools/optimizer/fusion/prelu_fusion.cc index 594023da..315a00b3 100644 --- a/mindspore-lite/tools/optimizer/fusion/prelu_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/prelu_fusion.cc @@ -21,7 +21,7 @@ #include "infer/cxx_api/prelu_fusion.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool PReluFusion::Init() const { input_ = std::make_shared(); @@ -126,4 +126,4 @@ bool PReluFusion::CheckPattern(const EquivPtr &equiv, std::vector *slope) return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/prelu_fusion.h b/mindspore-lite/tools/optimizer/fusion/prelu_fusion.h index 9b19f671..2be3202f 100644 --- a/mindspore-lite/tools/optimizer/fusion/prelu_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/prelu_fusion.h @@ -25,7 +25,7 @@ #include "include/common/utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class PReluFusion : public LitePatternProcessPass { public: @@ -45,6 +45,6 @@ class PReluFusion : public LitePatternProcessPass { mutable VarPtr mul_const_ = nullptr; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_PRELU_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc b/mindspore-lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc index f8ce59df..894c5513 100644 --- a/mindspore-lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc @@ -19,7 +19,7 @@ #include "nnacl_c/op_base.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_q.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { const BaseRef QuantDtypeCastFusion::DefinePattern() const { input_ = std::make_shared(); @@ -94,4 +94,4 @@ bool QuantDtypeCastFusion::CheckPattern(const EquivPtr &equiv, const AnfNodePtr return check_dtype_matched; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/quant_dtype_cast_fusion.h b/mindspore-lite/tools/optimizer/fusion/quant_dtype_cast_fusion.h index 146c4267..6dd57546 100644 --- a/mindspore-lite/tools/optimizer/fusion/quant_dtype_cast_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/quant_dtype_cast_fusion.h @@ -25,7 +25,7 @@ #include "include/common/utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class QuantDtypeCastFusion : public LitePatternProcessPass { public: @@ -43,5 +43,5 @@ class QuantDtypeCastFusion : public LitePatternProcessPass { mutable VarPtr input_ = nullptr; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_QUANT_DTYPE_CAST_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/reduce_same_op_in_horizon.cc b/mindspore-lite/tools/optimizer/fusion/reduce_same_op_in_horizon.cc index ac6d30f7..0d0e1f6f 100644 --- a/mindspore-lite/tools/optimizer/fusion/reduce_same_op_in_horizon.cc +++ b/mindspore-lite/tools/optimizer/fusion/reduce_same_op_in_horizon.cc @@ -26,7 +26,7 @@ #include "include/errorcode.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { bool CheckValueIsEqual(const ValuePtr &left, const ValuePtr &right) { @@ -174,4 +174,4 @@ bool ReduceSameOpInHorizon::Run(const FuncGraphPtr &func_graph) { return status == lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/reduce_same_op_in_horizon.h b/mindspore-lite/tools/optimizer/fusion/reduce_same_op_in_horizon.h index cce01387..ef508548 100644 --- a/mindspore-lite/tools/optimizer/fusion/reduce_same_op_in_horizon.h +++ b/mindspore-lite/tools/optimizer/fusion/reduce_same_op_in_horizon.h @@ -22,7 +22,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/converter/cxx_api/converter_para.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { using ConverterParaPtr = std::shared_ptr; class ReduceSameOpInHorizon : public Pass { @@ -37,6 +37,6 @@ class ReduceSameOpInHorizon : public Pass { ConverterParaPtr param_{nullptr}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_REDUCE_SAME_OP_IN_HORIZON_H diff --git a/mindspore-lite/tools/optimizer/fusion/reduce_stack_fusion.cc b/mindspore-lite/tools/optimizer/fusion/reduce_stack_fusion.cc index 543e865b..c34a2fd0 100644 --- a/mindspore-lite/tools/optimizer/fusion/reduce_stack_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/reduce_stack_fusion.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool ReduceStackFusion::Run(const FuncGraphPtr &func_graph) { if (func_graph == nullptr) { @@ -125,4 +125,4 @@ bool ReduceStackFusion::CheckReduce(const FuncGraphPtr &func_graph, const CNodeP return *(static_cast(data_info.data_ptr_)) == stack_axis; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/reduce_stack_fusion.h b/mindspore-lite/tools/optimizer/fusion/reduce_stack_fusion.h index e94ab653..fa028142 100644 --- a/mindspore-lite/tools/optimizer/fusion/reduce_stack_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/reduce_stack_fusion.h @@ -20,7 +20,7 @@ #include "ir/anf.h" #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ReduceStackFusion : public Pass { public: @@ -35,5 +35,5 @@ class ReduceStackFusion : public Pass { PrimitivePtr reduce_prim_{nullptr}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_REDUCE_STACK_FUSION_H diff --git a/mindspore-lite/tools/optimizer/fusion/remove_transitivity_op.cc b/mindspore-lite/tools/optimizer/fusion/remove_transitivity_op.cc index d46b1dbf..e6526e53 100644 --- a/mindspore-lite/tools/optimizer/fusion/remove_transitivity_op.cc +++ b/mindspore-lite/tools/optimizer/fusion/remove_transitivity_op.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool RemoveTransitivityOp::Run(const FuncGraphPtr &func_graph) { if (func_graph == nullptr) { @@ -159,4 +159,4 @@ int RemoveTransitivityOp::DoReplace(const FuncGraphPtr &func_graph, const CNodeP return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/remove_transitivity_op.h b/mindspore-lite/tools/optimizer/fusion/remove_transitivity_op.h index 344eb413..b59a088b 100644 --- a/mindspore-lite/tools/optimizer/fusion/remove_transitivity_op.h +++ b/mindspore-lite/tools/optimizer/fusion/remove_transitivity_op.h @@ -20,7 +20,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/graph/preprocess_dynamic_shape.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { // remove the op whose output is equal to its input. class RemoveTransitivityOp : public Pass { @@ -37,5 +37,5 @@ class RemoveTransitivityOp : public Pass { DynamicShapePreprocessor preprocessor_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_REMOVE_TRANSITIVITY_OP_H diff --git a/mindspore-lite/tools/optimizer/fusion/reshape_like_operator_ablation.cc b/mindspore-lite/tools/optimizer/fusion/reshape_like_operator_ablation.cc index 1d43dc26..c1140e96 100644 --- a/mindspore-lite/tools/optimizer/fusion/reshape_like_operator_ablation.cc +++ b/mindspore-lite/tools/optimizer/fusion/reshape_like_operator_ablation.cc @@ -25,7 +25,7 @@ #include "mindspore/core/include/ir/graph_utils.h" #include "mindspore/core/include/ir/primitive.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { const std::set kReshapeLikeOp = {"Reshape", "Squeeze", "Unsqueeze", "ExpandDims"}; @@ -118,4 +118,4 @@ int AblateReshapeLikeOp::DoAblation(const FuncGraphPtr &func_graph, const CNodeP return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/reshape_like_operator_ablation.h b/mindspore-lite/tools/optimizer/fusion/reshape_like_operator_ablation.h index 6f3ca857..395724e6 100644 --- a/mindspore-lite/tools/optimizer/fusion/reshape_like_operator_ablation.h +++ b/mindspore-lite/tools/optimizer/fusion/reshape_like_operator_ablation.h @@ -21,7 +21,7 @@ #include "tools/optimizer/graph/preprocess_dynamic_shape.h" #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { /* * If a link whose output-data is equal to its input-data, then the link may be ablated in some cases. @@ -44,5 +44,5 @@ class AblateReshapeLikeOp : public Pass { DynamicShapePreprocessor preprocessor_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_RESHAPE_LIKE_OPERATOR_ABLATION_H diff --git a/mindspore-lite/tools/optimizer/fusion/reshape_reduce_fusion.cc b/mindspore-lite/tools/optimizer/fusion/reshape_reduce_fusion.cc index 4a80bea3..7b2591cc 100644 --- a/mindspore-lite/tools/optimizer/fusion/reshape_reduce_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/reshape_reduce_fusion.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool CheckIsClosedCycle(const FuncGraphPtr &func_graph, const CNodePtr &in_node, const CNodePtr &out_node) { std::set ops{in_node}; @@ -276,4 +276,4 @@ void ReshapeReduceFusion::FuseReduceWithReshape(const FuncGraphPtr &func_graph, (void)manager->Replace(reshape_, reduce); } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/reshape_reduce_fusion.h b/mindspore-lite/tools/optimizer/fusion/reshape_reduce_fusion.h index 58f351c3..980da561 100644 --- a/mindspore-lite/tools/optimizer/fusion/reshape_reduce_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/reshape_reduce_fusion.h @@ -23,7 +23,7 @@ #include "utils/check_convert_utils.h" #include "tools/optimizer/graph/preprocess_dynamic_shape.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ReshapeReduceFusion : public Pass { public: @@ -45,5 +45,5 @@ class ReshapeReduceFusion : public Pass { DynamicShapePreprocessor preprocessor_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_RESHAPE_REDUCE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/reshape_reshape_fusion.cc b/mindspore-lite/tools/optimizer/fusion/reshape_reshape_fusion.cc index 7cf375e6..7d8ac4c4 100644 --- a/mindspore-lite/tools/optimizer/fusion/reshape_reshape_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/reshape_reshape_fusion.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_u.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { const auto &p1 = std::placeholders::_1; const auto kPreReshapePattern = "PreReshapePatternName"; @@ -211,4 +211,4 @@ AnfNodePtr ReshapeReshapeFusion::Process(const std::string &pattern_name, const new_reshape->set_fullname_with_scope(cnode->fullname_with_scope()); return new_reshape; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/reshape_reshape_fusion.h b/mindspore-lite/tools/optimizer/fusion/reshape_reshape_fusion.h index 1e322c1d..997de12d 100644 --- a/mindspore-lite/tools/optimizer/fusion/reshape_reshape_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/reshape_reshape_fusion.h @@ -23,7 +23,7 @@ #include "tools/optimizer/common/multiple_pattern_process_pass.h" #include "utils/check_convert_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ReshapeReshapeFusion : public MultiplePatternProcessPass { public: @@ -40,5 +40,5 @@ class ReshapeReshapeFusion : public MultiplePatternProcessPass { VectorRef DefineReshapeReshapePattern() const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_RESHAPE_RESHAPE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/reshape_shape_fusion.cc b/mindspore-lite/tools/optimizer/fusion/reshape_shape_fusion.cc index d98401cc..7bb0cede 100644 --- a/mindspore-lite/tools/optimizer/fusion/reshape_shape_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/reshape_shape_fusion.cc @@ -24,7 +24,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool ReshapeShapeFusion::Run(const FuncGraphPtr &func_graph) { if (func_graph == nullptr) { @@ -89,4 +89,4 @@ int ReshapeShapeFusion::Process(const FuncGraphPtr &func_graph, const CNodePtr & return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/reshape_shape_fusion.h b/mindspore-lite/tools/optimizer/fusion/reshape_shape_fusion.h index cf0dafcb..9a29ca18 100644 --- a/mindspore-lite/tools/optimizer/fusion/reshape_shape_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/reshape_shape_fusion.h @@ -19,7 +19,7 @@ #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { /* * The subgraph such as the following. @@ -39,6 +39,6 @@ class ReshapeShapeFusion : public Pass { int Process(const FuncGraphPtr &func_graph, const CNodePtr &reshape); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_RESHAPE_SHAPE_FUSION_H diff --git a/mindspore-lite/tools/optimizer/fusion/reshape_transpose_fusion.cc b/mindspore-lite/tools/optimizer/fusion/reshape_transpose_fusion.cc index 87e4c45d..76f1f526 100644 --- a/mindspore-lite/tools/optimizer/fusion/reshape_transpose_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/reshape_transpose_fusion.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { const auto &p1 = std::placeholders::_1; } // namespace @@ -453,4 +453,4 @@ AnfNodePtr ReshapeTransposeFusion::Process(const std::string &pattern_name, cons return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/reshape_transpose_fusion.h b/mindspore-lite/tools/optimizer/fusion/reshape_transpose_fusion.h index aa8f82bc..6f004ae7 100644 --- a/mindspore-lite/tools/optimizer/fusion/reshape_transpose_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/reshape_transpose_fusion.h @@ -24,7 +24,7 @@ #include "tools/optimizer/common/multiple_pattern_process_pass.h" #include "utils/check_convert_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ReshapeTransposeFusion : public MultiplePatternProcessPass { public: @@ -44,5 +44,5 @@ class ReshapeTransposeFusion : public MultiplePatternProcessPass { const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_RESHAPE_TRANSPOSE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/resize_fusion.cc b/mindspore-lite/tools/optimizer/fusion/resize_fusion.cc index 1755f1e7..cfd77f06 100644 --- a/mindspore-lite/tools/optimizer/fusion/resize_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/resize_fusion.cc @@ -37,7 +37,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef ResizeFusion1::DefinePattern() const { input_ = std::make_shared(); MS_CHECK_TRUE_RET(input_ != nullptr, false); @@ -267,4 +267,4 @@ const AnfNodePtr ResizeFusion::Process(const FuncGraphPtr &func_graph, const Anf } return node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/resize_fusion.h b/mindspore-lite/tools/optimizer/fusion/resize_fusion.h index cfd75b02..e7b9afdb 100644 --- a/mindspore-lite/tools/optimizer/fusion/resize_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/resize_fusion.h @@ -20,7 +20,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "tools/converter/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ResizeFusion : public LitePatternProcessPass { public: @@ -56,5 +56,5 @@ class ResizeFusion2 : public ResizeFusion { mutable VarPtr input_ = nullptr; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_RESIZE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/scale_activation_fusion.cc b/mindspore-lite/tools/optimizer/fusion/scale_activation_fusion.cc index 543cdfc5..e5840853 100644 --- a/mindspore-lite/tools/optimizer/fusion/scale_activation_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/scale_activation_fusion.cc @@ -26,7 +26,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef ScaleActivationFusion::DefinePattern() const { auto is_scale = std::make_shared(IsSpecifiedNode<&prim::kPrimScaleFusion>); MS_CHECK_TRUE_RET(is_scale != nullptr, {}); @@ -75,4 +75,4 @@ const AnfNodePtr ScaleActivationFusion::Process(const FuncGraphPtr &func_graph, (void)scale_prim_c->AddAttr(ops::kActivationType, MakeValue(static_cast(act_type))); return scale_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/scale_activation_fusion.h b/mindspore-lite/tools/optimizer/fusion/scale_activation_fusion.h index b6305277..815fe65d 100644 --- a/mindspore-lite/tools/optimizer/fusion/scale_activation_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/scale_activation_fusion.h @@ -20,7 +20,7 @@ #include #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ScaleActivationFusion : public LitePatternProcessPass { public: @@ -31,5 +31,5 @@ class ScaleActivationFusion : public LitePatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_SCALE_ACTIVATION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/scale_base_fusion.cc b/mindspore-lite/tools/optimizer/fusion/scale_base_fusion.cc index 214424bf..3eefba02 100644 --- a/mindspore-lite/tools/optimizer/fusion/scale_base_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/scale_base_fusion.cc @@ -25,7 +25,7 @@ #include "ops_utils/op_utils.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { int ScaleBaseFusion::CalNewCnodeScale(const CNodePtr &curr_cnode, const std::vector &fusion_cnode_inputs) const { auto curr_weight_node = curr_cnode->input(kInputIndexTwo); @@ -245,4 +245,4 @@ const AnfNodePtr ScaleBaseFusion::Process(const FuncGraphPtr &func_graph, const MS_LOG(INFO) << curr_cnode->fullname_with_scope() << " fusion success"; return fusion_cnode; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/scale_base_fusion.h b/mindspore-lite/tools/optimizer/fusion/scale_base_fusion.h index c7ecc257..c8c39836 100644 --- a/mindspore-lite/tools/optimizer/fusion/scale_base_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/scale_base_fusion.h @@ -24,7 +24,7 @@ #include "infer/cxx_api/scale_fusion.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class ScaleBaseFusion : public LitePatternProcessPass { public: explicit ScaleBaseFusion(std::string name, bool multigraph = true) : LitePatternProcessPass(name, multigraph) {} @@ -45,5 +45,5 @@ class ScaleBaseFusion : public LitePatternProcessPass { virtual int CalNewScaleImpl(float *curr_weight_data, std::vector prev_weight_shape, float *prev_weight_data, const AnfNodePtr &prim) const = 0; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_SRC_PASS_FUSION_SCALE_BASE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/scale_scale_fusion.cc b/mindspore-lite/tools/optimizer/fusion/scale_scale_fusion.cc index ff537a4c..00a7211f 100644 --- a/mindspore-lite/tools/optimizer/fusion/scale_scale_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/scale_scale_fusion.cc @@ -28,7 +28,7 @@ #include "ops_utils/op_utils.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kScaleWeightIndex = 2; constexpr size_t kScaleBiasIndex = 3; @@ -334,4 +334,4 @@ const AnfNodePtr ScaleScaleFusion::Process(const FuncGraphPtr &func_graph, const return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/scale_scale_fusion.h b/mindspore-lite/tools/optimizer/fusion/scale_scale_fusion.h index 6043390c..c191bbc2 100644 --- a/mindspore-lite/tools/optimizer/fusion/scale_scale_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/scale_scale_fusion.h @@ -21,7 +21,7 @@ #include #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class ScaleScaleFusion : public LitePatternProcessPass { public: explicit ScaleScaleFusion(bool multigraph = true, const std::string &name = "ScaleScaleFusion") @@ -48,5 +48,5 @@ class ScaleScaleFusion : public LitePatternProcessPass { mutable size_t up_scale_axis_; mutable size_t down_scale_axis_; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_SCALE_SCALE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/sigmoid_mul_fusion.cc b/mindspore-lite/tools/optimizer/fusion/sigmoid_mul_fusion.cc index 34c15542..3c3ccfe0 100644 --- a/mindspore-lite/tools/optimizer/fusion/sigmoid_mul_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/sigmoid_mul_fusion.cc @@ -26,7 +26,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { VectorRef SigmoidMulFusion::DefineSigmoidMulFirstPattern() const { auto is_activation = std::make_shared(IsSpecifiedNode<&prim::kPrimActivation>); MS_CHECK_TRUE_RET(is_activation != nullptr, {}); @@ -105,4 +105,4 @@ bool SigmoidMulFusion::CheckPattern(const std::string &pattern_name, const FuncG } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/sigmoid_mul_fusion.h b/mindspore-lite/tools/optimizer/fusion/sigmoid_mul_fusion.h index dee6a110..8eb7249a 100644 --- a/mindspore-lite/tools/optimizer/fusion/sigmoid_mul_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/sigmoid_mul_fusion.h @@ -23,7 +23,7 @@ #include "tools/optimizer/common/multiple_pattern_process_pass.h" #include "tools/converter/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class SigmoidMulFusion : public MultiplePatternProcessPass { public: @@ -42,5 +42,5 @@ class SigmoidMulFusion : public MultiplePatternProcessPass { const CNodePtr &mul_cnode) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_SIGMOID_MUL_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/squeeze_expanddims_fusion.cc b/mindspore-lite/tools/optimizer/fusion/squeeze_expanddims_fusion.cc index 74e9632b..c26c7370 100644 --- a/mindspore-lite/tools/optimizer/fusion/squeeze_expanddims_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/squeeze_expanddims_fusion.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_e.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef SqueezeExpandDimsFusion::DefinePattern() const { auto is_expanddims = std::make_shared(IsSpecifiedNode<&prim::kPrimExpandDims>); MS_CHECK_TRUE_RET(is_expanddims != nullptr, {}); @@ -117,4 +117,4 @@ const AnfNodePtr SqueezeExpandDimsFusion::Process(const FuncGraphPtr &func_graph (void)manage->Replace(expanddims_cnode, squeeze_cnode->input(SECOND_INPUT)); return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/squeeze_expanddims_fusion.h b/mindspore-lite/tools/optimizer/fusion/squeeze_expanddims_fusion.h index 35067080..17ed8987 100644 --- a/mindspore-lite/tools/optimizer/fusion/squeeze_expanddims_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/squeeze_expanddims_fusion.h @@ -22,7 +22,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "utils/check_convert_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class SqueezeExpandDimsFusion : public LitePatternProcessPass { public: @@ -36,5 +36,5 @@ class SqueezeExpandDimsFusion : public LitePatternProcessPass { bool CheckCanFuse(const FuncGraphPtr &func_graph, const AnfNodePtr &node) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_SQUEEZE_EXPANDDIMS_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/squeeze_fusion.cc b/mindspore-lite/tools/optimizer/fusion/squeeze_fusion.cc index 1eb956e7..b0092570 100644 --- a/mindspore-lite/tools/optimizer/fusion/squeeze_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/squeeze_fusion.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_u.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { const BaseRef SqueezeFusion::DefinePattern() const { auto is_squeeze = std::make_shared(IsSpecifiedNode<&prim::kPrimSqueeze>); MS_CHECK_TRUE_RET(is_squeeze != nullptr, {}); @@ -113,4 +113,4 @@ const AnfNodePtr SqueezeFusion::Process(const FuncGraphPtr &func_graph, const An } return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/squeeze_fusion.h b/mindspore-lite/tools/optimizer/fusion/squeeze_fusion.h index 5c5e2aba..de910218 100644 --- a/mindspore-lite/tools/optimizer/fusion/squeeze_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/squeeze_fusion.h @@ -21,7 +21,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "schema/inner/model_generated.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class SqueezeFusion : public LitePatternProcessPass { public: @@ -32,5 +32,5 @@ class SqueezeFusion : public LitePatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_SQUEEZE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/strided_slice_checker.cc b/mindspore-lite/tools/optimizer/fusion/strided_slice_checker.cc index 79394bbb..2a844d62 100644 --- a/mindspore-lite/tools/optimizer/fusion/strided_slice_checker.cc +++ b/mindspore-lite/tools/optimizer/fusion/strided_slice_checker.cc @@ -21,7 +21,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "mindspore/ops/op_def/op_name.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool StridedSliceChecker::CheckCommonInfo(const CNodePtr &strided_slice) { if (strided_slice == nullptr || strided_slice->size() > kInputSizeFive) { @@ -154,4 +154,4 @@ int StridedSliceChecker::GetConstTensor(const CNodePtr &strided_slice, size_t in return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/strided_slice_checker.h b/mindspore-lite/tools/optimizer/fusion/strided_slice_checker.h index c0ea3714..da996cc6 100644 --- a/mindspore-lite/tools/optimizer/fusion/strided_slice_checker.h +++ b/mindspore-lite/tools/optimizer/fusion/strided_slice_checker.h @@ -21,7 +21,7 @@ #include "ir/anf.h" #include "tools/lite_exporter/fetch_content.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class StridedSliceChecker { public: @@ -36,5 +36,5 @@ class StridedSliceChecker { static int GetConstTensor(const CNodePtr &strided_slice, size_t index, lite::DataInfo *data_info); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_STRIDED_SLICE_CHECKER_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/strided_slice_fusion.cc b/mindspore-lite/tools/optimizer/fusion/strided_slice_fusion.cc index 6b22babb..247cf67c 100644 --- a/mindspore-lite/tools/optimizer/fusion/strided_slice_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/strided_slice_fusion.cc @@ -29,7 +29,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { bool CheckContinuity(const std::vector &nodes, int axis) { @@ -198,4 +198,4 @@ bool StridedSliceFusion::CheckCanFusion() { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/strided_slice_fusion.h b/mindspore-lite/tools/optimizer/fusion/strided_slice_fusion.h index 5a2170cb..9b36eca8 100644 --- a/mindspore-lite/tools/optimizer/fusion/strided_slice_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/strided_slice_fusion.h @@ -19,7 +19,7 @@ #include #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class StridedSliceFusion : public Pass { public: @@ -35,6 +35,6 @@ class StridedSliceFusion : public Pass { int axis_{0}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_STRIDED_SLICE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/tensor_dot_fusion.cc b/mindspore-lite/tools/optimizer/fusion/tensor_dot_fusion.cc index fabb55b2..b3d063d3 100644 --- a/mindspore-lite/tools/optimizer/fusion/tensor_dot_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/tensor_dot_fusion.cc @@ -32,7 +32,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { STATUS GetIndexValue(const CNodePtr &cnode, std::vector *index, int node_index) { MS_ASSERT(cnode != nullptr); @@ -210,4 +210,4 @@ const AnfNodePtr TensorDotFusion::Process(const FuncGraphPtr &func_graph, const } return nullptr; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/tensor_dot_fusion.h b/mindspore-lite/tools/optimizer/fusion/tensor_dot_fusion.h index b8ac87e8..708a307b 100644 --- a/mindspore-lite/tools/optimizer/fusion/tensor_dot_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/tensor_dot_fusion.h @@ -26,7 +26,7 @@ #include "infer/cxx_api/scale_fusion.h" #include "utils/check_convert_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class TensorDotFusion : public LitePatternProcessPass { public: @@ -36,5 +36,5 @@ class TensorDotFusion : public LitePatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TENSOR_DOT_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/tf_bidirection_gru_fusion.cc b/mindspore-lite/tools/optimizer/fusion/tf_bidirection_gru_fusion.cc index f7ed4cc5..f512be03 100644 --- a/mindspore-lite/tools/optimizer/fusion/tf_bidirection_gru_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/tf_bidirection_gru_fusion.cc @@ -43,7 +43,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_w.h" #include "ir/tensor_new.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr int kOffsetTwo = 2; @@ -855,4 +855,4 @@ const AnfNodePtr TfBidirectionGruFusion::Process(const FuncGraphPtr &func_graph, return output_node; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/tf_bidirection_gru_fusion.h b/mindspore-lite/tools/optimizer/fusion/tf_bidirection_gru_fusion.h index f719ebb7..e86b92a7 100644 --- a/mindspore-lite/tools/optimizer/fusion/tf_bidirection_gru_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/tf_bidirection_gru_fusion.h @@ -25,7 +25,7 @@ #include "include/common/utils/utils.h" #include "include/errorcode.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { constexpr size_t kWhileUniqInputsLength = 6; // fuse tf 2.x bidirection_gru into MSLITE GRU @@ -92,6 +92,6 @@ class TfBidirectionGruFusion : public LitePatternProcessPass { }; inline bool IsParameterNode(const BaseRef &n) { return utils::isa(n); } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TF_BIDIRECTION_GRU_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/tf_gelu_fusion.cc b/mindspore-lite/tools/optimizer/fusion/tf_gelu_fusion.cc index 91ed7586..a5e7e8c1 100644 --- a/mindspore-lite/tools/optimizer/fusion/tf_gelu_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/tf_gelu_fusion.cc @@ -24,7 +24,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr float DIFF_THRESHOLD = 0.0001; @@ -134,4 +134,4 @@ bool TfGeLUFusion::CheckPattern(const std::string &pattern_name, const EquivPtr return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/tf_gelu_fusion.h b/mindspore-lite/tools/optimizer/fusion/tf_gelu_fusion.h index b597f929..2198258a 100644 --- a/mindspore-lite/tools/optimizer/fusion/tf_gelu_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/tf_gelu_fusion.h @@ -22,7 +22,7 @@ #include #include "tools/optimizer/fusion/gelu_fusion.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class TfGeLUFusion : public GeLUFusion { public: @@ -48,6 +48,6 @@ class TfGeLUFusion : public GeLUFusion { mutable VarPtr mul3_x_ = nullptr; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TF_GELU_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/tf_lstm_cell_fusion.cc b/mindspore-lite/tools/optimizer/fusion/tf_lstm_cell_fusion.cc index 3d12255f..93d2d854 100644 --- a/mindspore-lite/tools/optimizer/fusion/tf_lstm_cell_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/tf_lstm_cell_fusion.cc @@ -32,7 +32,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr int kNumInPlaceHolder = 10; @@ -449,4 +449,4 @@ CNodePtr TfLstmCellFusion::CreateLSTMNode(const FuncGraphPtr &func_graph, const return new_node; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/tf_lstm_cell_fusion.h b/mindspore-lite/tools/optimizer/fusion/tf_lstm_cell_fusion.h index b6716519..d574813f 100644 --- a/mindspore-lite/tools/optimizer/fusion/tf_lstm_cell_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/tf_lstm_cell_fusion.h @@ -24,7 +24,7 @@ #include "include/common/utils/utils.h" #include "include/errorcode.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class TfLstmCellFusion : public TfliteLstmCellFusion { public: @@ -47,6 +47,6 @@ class TfLstmCellFusion : public TfliteLstmCellFusion { mutable VarPtr forget_bias_ = nullptr; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TF_LSTM_CELL_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.cc b/mindspore-lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.cc index 770bf7ec..481cbf91 100644 --- a/mindspore-lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.cc @@ -40,7 +40,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_w.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kWhileInputsLength = 23; @@ -840,4 +840,4 @@ const AnfNodePtr TfliteLstmCellFusion::Process(const FuncGraphPtr &func_graph, c return squeeze_node; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.h b/mindspore-lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.h index 2851f15d..bbfb675b 100644 --- a/mindspore-lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.h @@ -23,7 +23,7 @@ #include "include/common/utils/utils.h" #include "include/errorcode.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class TfliteLstmCellFusion : public LitePatternProcessPass { public: @@ -90,5 +90,5 @@ class TfliteLstmCellFusion : public LitePatternProcessPass { size_t body_cnodes_num_ = 0; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TFLITE_LSTM_CELL_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/tflite_rel_pos_multi_head_attention_fusion.cc b/mindspore-lite/tools/optimizer/fusion/tflite_rel_pos_multi_head_attention_fusion.cc index fd9f9098..34454af4 100644 --- a/mindspore-lite/tools/optimizer/fusion/tflite_rel_pos_multi_head_attention_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/tflite_rel_pos_multi_head_attention_fusion.cc @@ -35,7 +35,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { const auto &p1 = std::placeholders::_1; const size_t kWeightQueryIndex = 4; @@ -554,4 +554,4 @@ const VectorRef TfliteRelPosMultiHeadAttentionFusion::DefineProcessOutputPattern result = VectorRef({is_add, result, bias}); return result; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/tflite_rel_pos_multi_head_attention_fusion.h b/mindspore-lite/tools/optimizer/fusion/tflite_rel_pos_multi_head_attention_fusion.h index 0a154619..2c187e8b 100644 --- a/mindspore-lite/tools/optimizer/fusion/tflite_rel_pos_multi_head_attention_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/tflite_rel_pos_multi_head_attention_fusion.h @@ -25,7 +25,7 @@ #include "include/errorcode.h" #include "tools/optimizer/fusion/multi_head_attention_fusion.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class TfliteRelPosMultiHeadAttentionFusion : public MultiHeadAttentionFusion { public: explicit TfliteRelPosMultiHeadAttentionFusion(const std::string &name = "TfliteRelPosMultiHeadAttentionFusion", @@ -72,5 +72,5 @@ class TfliteRelPosMultiHeadAttentionFusion : public MultiHeadAttentionFusion { mutable std::vector pos_stack_params_; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TFLITE_REL_POS_MULTI_HEAD_ATTENTION_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/tile_matmul_fusion.cc b/mindspore-lite/tools/optimizer/fusion/tile_matmul_fusion.cc index 927e1645..903d5b16 100644 --- a/mindspore-lite/tools/optimizer/fusion/tile_matmul_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/tile_matmul_fusion.cc @@ -25,7 +25,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool TileMatMulFusion::CheckCanFuse(const FuncGraphPtr &func_graph, const AnfNodePtr &node) const { auto tile_cnode = node->cast(); @@ -113,4 +113,4 @@ bool TileMatMulFusion::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/tile_matmul_fusion.h b/mindspore-lite/tools/optimizer/fusion/tile_matmul_fusion.h index 280dc265..37c3232b 100644 --- a/mindspore-lite/tools/optimizer/fusion/tile_matmul_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/tile_matmul_fusion.h @@ -21,7 +21,7 @@ #include "tools/optimizer/common/multiple_pattern_process_pass.h" #include "utils/check_convert_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class TileMatMulFusion : public Pass { public: @@ -33,5 +33,5 @@ class TileMatMulFusion : public Pass { bool CheckCanFuse(const FuncGraphPtr &func_graph, const AnfNodePtr &node) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TILE_MATMUL_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/transpose_fusion.cc b/mindspore-lite/tools/optimizer/fusion/transpose_fusion.cc index 92676e2e..a02cd72f 100644 --- a/mindspore-lite/tools/optimizer/fusion/transpose_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/transpose_fusion.cc @@ -35,7 +35,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { bool IsBNCNode(const BaseRef &n) { if (utils::isa(n)) { auto anf_node = utils::cast(n); @@ -368,4 +368,4 @@ AnfNodePtr TransposeFusion::Process(const std::string &pattern_name, const minds manager->SetEdge(any_cnode, 1, transpose_cnode->input(1)); return trans_post_node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/fusion/transpose_fusion.h b/mindspore-lite/tools/optimizer/fusion/transpose_fusion.h index 6e9c44ab..a310449d 100644 --- a/mindspore-lite/tools/optimizer/fusion/transpose_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/transpose_fusion.h @@ -22,7 +22,7 @@ #include "include/backend/optimizer/optimizer.h" #include "tools/optimizer/common/multiple_pattern_process_pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class TransposeFusion : public MultiplePatternProcessPass { public: @@ -47,5 +47,5 @@ class TransposeFusion : public MultiplePatternProcessPass { int AdjustAxis(const mindspore::AnfNodePtr &node) const; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TRANSPOSE_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/fusion/transpose_gather_fusion.cc b/mindspore-lite/tools/optimizer/fusion/transpose_gather_fusion.cc index 1611a697..98e71246 100644 --- a/mindspore-lite/tools/optimizer/fusion/transpose_gather_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/transpose_gather_fusion.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kPermMaxSize = 20; @@ -250,4 +250,4 @@ bool TransposeGatherFusion::CheckIsMatch(const std::vector &pre_perm, const return third_transform == align_transform; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/transpose_gather_fusion.h b/mindspore-lite/tools/optimizer/fusion/transpose_gather_fusion.h index c8042034..10418f32 100644 --- a/mindspore-lite/tools/optimizer/fusion/transpose_gather_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/transpose_gather_fusion.h @@ -21,7 +21,7 @@ #include #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { /* * The subgraph such as the following, in some times, the transpose-op can be fused. @@ -49,6 +49,6 @@ class TransposeGatherFusion : public Pass { std::vector gather_axes_data_ptr_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TRANSPOSE_GATHER_FUSION_H diff --git a/mindspore-lite/tools/optimizer/fusion/transpose_matmul_fusion.cc b/mindspore-lite/tools/optimizer/fusion/transpose_matmul_fusion.cc index b5f127d5..39bf94d8 100644 --- a/mindspore-lite/tools/optimizer/fusion/transpose_matmul_fusion.cc +++ b/mindspore-lite/tools/optimizer/fusion/transpose_matmul_fusion.cc @@ -29,7 +29,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { inline const std::vector kMatMulTransPerm1 = {0, 1, 3, 2}; @@ -119,4 +119,4 @@ bool TransposeMatMulFusion::Run(const FuncGraphPtr &func_graph) { return false; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/fusion/transpose_matmul_fusion.h b/mindspore-lite/tools/optimizer/fusion/transpose_matmul_fusion.h index 629f75d5..035955ab 100644 --- a/mindspore-lite/tools/optimizer/fusion/transpose_matmul_fusion.h +++ b/mindspore-lite/tools/optimizer/fusion/transpose_matmul_fusion.h @@ -21,7 +21,7 @@ #include "tools/converter/converter_context.h" #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class TransposeMatMulFusion : public Pass { public: @@ -30,5 +30,5 @@ class TransposeMatMulFusion : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_FUSION_TRANSPOSE_MATMUL_FUSION_H_ diff --git a/mindspore-lite/tools/optimizer/graph/add_tensor_array.cc b/mindspore-lite/tools/optimizer/graph/add_tensor_array.cc index e6a40b07..1b703008 100644 --- a/mindspore-lite/tools/optimizer/graph/add_tensor_array.cc +++ b/mindspore-lite/tools/optimizer/graph/add_tensor_array.cc @@ -33,7 +33,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { constexpr auto kDefaultIndex = 0; constexpr auto kInputNodeIndex = 1; constexpr auto kDefaultNumTensors = 1; @@ -237,4 +237,4 @@ const AnfNodePtr AddTensorArray::Process(const FuncGraphPtr &func_graph, const A return node; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/add_tensor_array.h b/mindspore-lite/tools/optimizer/graph/add_tensor_array.h index 39c72955..02c9cc36 100644 --- a/mindspore-lite/tools/optimizer/graph/add_tensor_array.h +++ b/mindspore-lite/tools/optimizer/graph/add_tensor_array.h @@ -21,7 +21,7 @@ #include "schema/inner/model_generated.h" #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AddTensorArray : public LitePatternProcessPass { public: @@ -32,6 +32,6 @@ class AddTensorArray : public LitePatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_ADD_TENSOR_ARRAY_H_ diff --git a/mindspore-lite/tools/optimizer/graph/add_variable_node_pass.cc b/mindspore-lite/tools/optimizer/graph/add_variable_node_pass.cc index ada4fe63..7739272e 100644 --- a/mindspore-lite/tools/optimizer/graph/add_variable_node_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/add_variable_node_pass.cc @@ -37,7 +37,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kInputSize3 = 3; @@ -719,4 +719,4 @@ bool InsertVariableNodePass::Run(const FuncGraphPtr &graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/add_variable_node_pass.h b/mindspore-lite/tools/optimizer/graph/add_variable_node_pass.h index ba623d67..d3e5a969 100644 --- a/mindspore-lite/tools/optimizer/graph/add_variable_node_pass.h +++ b/mindspore-lite/tools/optimizer/graph/add_variable_node_pass.h @@ -25,7 +25,7 @@ #include "tools/converter/cxx_api/converter_para.h" #include "include/errorcode.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class InsertVariableNodePass : public Pass { public: @@ -68,5 +68,5 @@ class InsertVariableNodePass : public Pass { std::shared_ptr param_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_ADD_VARIABLE_NODE_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/adjust_ascend_quant_pass.cc b/mindspore-lite/tools/optimizer/graph/adjust_ascend_quant_pass.cc index 4bc9cfb4..a599049d 100644 --- a/mindspore-lite/tools/optimizer/graph/adjust_ascend_quant_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/adjust_ascend_quant_pass.cc @@ -25,7 +25,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { bool AdjustAscendQuant(const FuncGraphPtr &func_graph, const CNodePtr &cnode) { @@ -104,4 +104,4 @@ bool AdjustAscendQunatPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/adjust_ascend_quant_pass.h b/mindspore-lite/tools/optimizer/graph/adjust_ascend_quant_pass.h index 6d5b7905..959c898c 100644 --- a/mindspore-lite/tools/optimizer/graph/adjust_ascend_quant_pass.h +++ b/mindspore-lite/tools/optimizer/graph/adjust_ascend_quant_pass.h @@ -19,7 +19,7 @@ #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class AdjustAscendQunatPass : public Pass { public: @@ -28,6 +28,6 @@ class AdjustAscendQunatPass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_ADJUST_ASCEND_QUANT_H diff --git a/mindspore-lite/tools/optimizer/graph/args_to_attr_pass.cc b/mindspore-lite/tools/optimizer/graph/args_to_attr_pass.cc index 1d021cdc..e73b7f4e 100644 --- a/mindspore-lite/tools/optimizer/graph/args_to_attr_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/args_to_attr_pass.cc @@ -22,7 +22,7 @@ #include "ops/op_def.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool ArgsToAttrPass::Run(const FuncGraphPtr &func_graph) { if (func_graph == nullptr) { @@ -107,4 +107,4 @@ bool ArgsToAttrPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/args_to_attr_pass.h b/mindspore-lite/tools/optimizer/graph/args_to_attr_pass.h index 3f1eac2e..f959180e 100644 --- a/mindspore-lite/tools/optimizer/graph/args_to_attr_pass.h +++ b/mindspore-lite/tools/optimizer/graph/args_to_attr_pass.h @@ -23,7 +23,7 @@ #include #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class ArgsToAttrPass : public Pass { public: @@ -32,5 +32,5 @@ class ArgsToAttrPass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_ARGS_TO_ATTR_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/attr_to_args_pass.cc b/mindspore-lite/tools/optimizer/graph/attr_to_args_pass.cc index 63f14d2b..4fe0e1f5 100644 --- a/mindspore-lite/tools/optimizer/graph/attr_to_args_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/attr_to_args_pass.cc @@ -24,7 +24,7 @@ #include "utils/anf_utils.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { @@ -318,4 +318,4 @@ bool AttrToArgsPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/attr_to_args_pass.h b/mindspore-lite/tools/optimizer/graph/attr_to_args_pass.h index ca369c37..382832ff 100644 --- a/mindspore-lite/tools/optimizer/graph/attr_to_args_pass.h +++ b/mindspore-lite/tools/optimizer/graph/attr_to_args_pass.h @@ -23,7 +23,7 @@ #include #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { // The dynamic op defined be yaml has changed the node attrs to args. class AttrToArgsPass : public Pass { @@ -33,5 +33,5 @@ class AttrToArgsPass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_ARGS_TO_ATTR_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/broadcast_for_select.cc b/mindspore-lite/tools/optimizer/graph/broadcast_for_select.cc index a800693c..1018a2d2 100644 --- a/mindspore-lite/tools/optimizer/graph/broadcast_for_select.cc +++ b/mindspore-lite/tools/optimizer/graph/broadcast_for_select.cc @@ -26,7 +26,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_b.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { ShapeVector GetSelectInputShape(const AnfNodePtr &input) { @@ -133,4 +133,4 @@ const AnfNodePtr BroadCastForSelect::Process(const FuncGraphPtr &graph, const An return out_node; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/broadcast_for_select.h b/mindspore-lite/tools/optimizer/graph/broadcast_for_select.h index 6f45875f..4a97de61 100644 --- a/mindspore-lite/tools/optimizer/graph/broadcast_for_select.h +++ b/mindspore-lite/tools/optimizer/graph/broadcast_for_select.h @@ -22,7 +22,7 @@ #include #include "tools/optimizer/common/pattern_process_pass_extends.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class BroadCastForSelect : public LitePatternProcessPass { public: @@ -32,5 +32,5 @@ class BroadCastForSelect : public LitePatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &graph, const AnfNodePtr &node, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_BROADCAST_FOR_SELECT_H_ diff --git a/mindspore-lite/tools/optimizer/graph/clip_convert_activation_pass.cc b/mindspore-lite/tools/optimizer/graph/clip_convert_activation_pass.cc index a9da5add..16dcff8f 100644 --- a/mindspore-lite/tools/optimizer/graph/clip_convert_activation_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/clip_convert_activation_pass.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kClipMinIndex = 2; constexpr size_t kClipMaxIndex = 3; @@ -113,4 +113,4 @@ bool ClipConvertActivationPass::Run(const FuncGraphPtr &graph) { } return false; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/clip_convert_activation_pass.h b/mindspore-lite/tools/optimizer/graph/clip_convert_activation_pass.h index 0508a731..7a3736ed 100644 --- a/mindspore-lite/tools/optimizer/graph/clip_convert_activation_pass.h +++ b/mindspore-lite/tools/optimizer/graph/clip_convert_activation_pass.h @@ -19,7 +19,7 @@ #include #include "include/backend/optimizer/pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class ClipConvertActivationPass : public Pass { public: explicit ClipConvertActivationPass(bool only_relu = false) : Pass("clip_convert_activation_pass") { @@ -31,5 +31,5 @@ class ClipConvertActivationPass : public Pass { private: bool only_relu_ = false; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_CLIP_CONVERT_ACTIVATION_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/concat_op_pass.cc b/mindspore-lite/tools/optimizer/graph/concat_op_pass.cc index ae207faf..240844b8 100644 --- a/mindspore-lite/tools/optimizer/graph/concat_op_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/concat_op_pass.cc @@ -34,7 +34,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { #if !defined(_WIN32) && !defined(_WIN64) CNodePtr CreateTupleGetItemNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, size_t output_idx) { @@ -230,4 +230,4 @@ bool ConcatOpPass::Run(const FuncGraphPtr &func_graph) { #else bool ConcatOpPass::Run(const FuncGraphPtr &func_graph) { return true; } #endif -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/concat_op_pass.h b/mindspore-lite/tools/optimizer/graph/concat_op_pass.h index 79da2dfe..1d2f60b6 100644 --- a/mindspore-lite/tools/optimizer/graph/concat_op_pass.h +++ b/mindspore-lite/tools/optimizer/graph/concat_op_pass.h @@ -21,7 +21,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class ConcatOpPass : public Pass { public: ConcatOpPass() : Pass("concat_op_pass") {} @@ -33,5 +33,5 @@ class ConcatOpPass : public Pass { STATUS AddDynamicInputSizeAttrForNode(const AnfNodePtr &anf_node); STATUS RunInsertSizeAttrPass(const FuncGraphPtr &func_graph, const FuncGraphManagerPtr &manager); }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_CONCAT_OP_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/control_flow_pass.cc b/mindspore-lite/tools/optimizer/graph/control_flow_pass.cc index 67764884..a4e48cd3 100644 --- a/mindspore-lite/tools/optimizer/graph/control_flow_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/control_flow_pass.cc @@ -36,7 +36,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_w.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { void ControlFlowPass::ReplaceNode(const FuncGraphPtr &fg, const std::unordered_map &replace_pairs) { for (auto &node : fg->nodes()) { @@ -818,4 +818,4 @@ bool ControlFlowPass::Run(const FuncGraphPtr &fg) { } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/control_flow_pass.h b/mindspore-lite/tools/optimizer/graph/control_flow_pass.h index b83b82cb..68d29f66 100644 --- a/mindspore-lite/tools/optimizer/graph/control_flow_pass.h +++ b/mindspore-lite/tools/optimizer/graph/control_flow_pass.h @@ -24,7 +24,7 @@ #include "schema/inner/model_generated.h" #include "include/backend/optimizer/pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class ControlFlowPass : public Pass { public: ControlFlowPass() : Pass("control_flow_pass") {} @@ -94,5 +94,5 @@ class ControlFlowPass : public Pass { std::deque to_process_q{}; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif diff --git a/mindspore-lite/tools/optimizer/graph/core_infershape_pass.cc b/mindspore-lite/tools/optimizer/graph/core_infershape_pass.cc index 6c4722df..8632fd38 100644 --- a/mindspore-lite/tools/optimizer/graph/core_infershape_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/core_infershape_pass.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_w.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { int JudgeControlFlowCertainOutputHasInferred(const CNodePtr &return_cnode, size_t index, bool *infer_info) { @@ -405,4 +405,4 @@ int CoreInferShapePass::ResetSubGraphInput() { return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/core_infershape_pass.h b/mindspore-lite/tools/optimizer/graph/core_infershape_pass.h index 3bae5b56..c48bc634 100644 --- a/mindspore-lite/tools/optimizer/graph/core_infershape_pass.h +++ b/mindspore-lite/tools/optimizer/graph/core_infershape_pass.h @@ -25,7 +25,7 @@ #include "include/errorcode.h" #include "include/registry/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { using mindspore::lite::STATUS; class CoreInferShapePass : public Pass { @@ -51,5 +51,5 @@ class CoreInferShapePass : public Pass { FuncGraphManagerPtr manager_{nullptr}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_CORE_INFERSHAPE_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/decrease_transpose_algo.cc b/mindspore-lite/tools/optimizer/graph/decrease_transpose_algo.cc index 8f073b2b..d5516063 100644 --- a/mindspore-lite/tools/optimizer/graph/decrease_transpose_algo.cc +++ b/mindspore-lite/tools/optimizer/graph/decrease_transpose_algo.cc @@ -39,7 +39,7 @@ #include "ir/tensor_new.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { std::function check_node = [](const CNodePtr &cnode) { @@ -908,4 +908,4 @@ bool DecreaseTransposeAlgo::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/decrease_transpose_algo.h b/mindspore-lite/tools/optimizer/graph/decrease_transpose_algo.h index f5a8ffc8..778ab012 100644 --- a/mindspore-lite/tools/optimizer/graph/decrease_transpose_algo.h +++ b/mindspore-lite/tools/optimizer/graph/decrease_transpose_algo.h @@ -29,7 +29,7 @@ #include "tools/optimizer/graph/transpose_strategy.h" using mindspore::converter::FmkType; -namespace mindspore { +namespace mindspore::lite { namespace opt { class DecreaseTransposeAlgo : public Pass { public: @@ -68,6 +68,6 @@ class DecreaseTransposeAlgo : public Pass { std::unordered_map> sub_inputs_map_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_DECREASE_TRANSPOSE_ALGO_H_ diff --git a/mindspore-lite/tools/optimizer/graph/dump_graph.h b/mindspore-lite/tools/optimizer/graph/dump_graph.h index ccb9682e..25151bc2 100644 --- a/mindspore-lite/tools/optimizer/graph/dump_graph.h +++ b/mindspore-lite/tools/optimizer/graph/dump_graph.h @@ -22,7 +22,7 @@ #include "include/registry/pass_base.h" #include "mindapi/ir/func_graph.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class DumpGraph : public registry::PassBase, public Pass { public: @@ -50,6 +50,6 @@ class DumpGraph : public registry::PassBase, public Pass { const std::shared_ptr ¶m_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_DUMP_GRAPH_H_ diff --git a/mindspore-lite/tools/optimizer/graph/eliminate_redundant_cast_pass.cc b/mindspore-lite/tools/optimizer/graph/eliminate_redundant_cast_pass.cc index 250ff0a9..788cab2c 100644 --- a/mindspore-lite/tools/optimizer/graph/eliminate_redundant_cast_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/eliminate_redundant_cast_pass.cc @@ -20,7 +20,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { int EliminateRedundantCastPass::RemoveCastOp(const AnfNodePtr &anf_node, const FuncGraphManagerPtr &manager) { const int expected_cast_input_count = 3; auto cast_cnode = anf_node->cast(); @@ -75,4 +75,4 @@ bool EliminateRedundantCastPass::Run(const FuncGraphPtr &func_graph) { } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/eliminate_redundant_cast_pass.h b/mindspore-lite/tools/optimizer/graph/eliminate_redundant_cast_pass.h index aab3a4d1..0d4ca324 100644 --- a/mindspore-lite/tools/optimizer/graph/eliminate_redundant_cast_pass.h +++ b/mindspore-lite/tools/optimizer/graph/eliminate_redundant_cast_pass.h @@ -20,7 +20,7 @@ #include "include/registry/converter_context.h" #include "include/backend/optimizer/pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { using mindspore::converter::FmkType; class EliminateRedundantCastPass : public Pass { public: @@ -37,5 +37,5 @@ class EliminateRedundantCastPass : public Pass { bool train_flag_{false}; std::set remove_cnode_; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_ELIMINATE_REDUNDANT_CAST_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/group_depthwise_op_convert_pass.cc b/mindspore-lite/tools/optimizer/graph/group_depthwise_op_convert_pass.cc index f0b49407..c70c77d8 100644 --- a/mindspore-lite/tools/optimizer/graph/group_depthwise_op_convert_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/group_depthwise_op_convert_pass.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kConvWeightIndex = 2; constexpr size_t kConvInputIndex = 1; @@ -129,4 +129,4 @@ bool GroupDepthwiseOpConvertPass::Run(const FuncGraphPtr &graph) { } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/group_depthwise_op_convert_pass.h b/mindspore-lite/tools/optimizer/graph/group_depthwise_op_convert_pass.h index 7098cd0f..7c8d344f 100644 --- a/mindspore-lite/tools/optimizer/graph/group_depthwise_op_convert_pass.h +++ b/mindspore-lite/tools/optimizer/graph/group_depthwise_op_convert_pass.h @@ -18,12 +18,12 @@ #include #include "include/backend/optimizer/pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class GroupDepthwiseOpConvertPass : public Pass { public: GroupDepthwiseOpConvertPass() : Pass("group_depthwise_op_convert_pass") {} ~GroupDepthwiseOpConvertPass() override = default; bool Run(const FuncGraphPtr &graph) override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_GROUP_DEPTHWISE_OP_CONVERT_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/grouped_matmul_op_pass.cc b/mindspore-lite/tools/optimizer/graph/grouped_matmul_op_pass.cc index ef04f2c2..6eba34b6 100644 --- a/mindspore-lite/tools/optimizer/graph/grouped_matmul_op_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/grouped_matmul_op_pass.cc @@ -38,7 +38,7 @@ #include "ir/tensor_new.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { #if !defined(_WIN32) && !defined(_WIN64) const std::map> OpInputDtypeMap = {{prim::kPrimGroupedMatmul->name(), {{2, TypeId::kTypeUnknown}, @@ -382,4 +382,4 @@ bool GroupedMatmulOpPass::Run(const FuncGraphPtr &func_graph) { #else bool GroupedMatmulOpPass::Run(const FuncGraphPtr &func_graph) { return true; } #endif -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/grouped_matmul_op_pass.h b/mindspore-lite/tools/optimizer/graph/grouped_matmul_op_pass.h index b3edce56..943a6cdf 100644 --- a/mindspore-lite/tools/optimizer/graph/grouped_matmul_op_pass.h +++ b/mindspore-lite/tools/optimizer/graph/grouped_matmul_op_pass.h @@ -23,7 +23,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class GroupedMatmulOpPass : public Pass { public: GroupedMatmulOpPass() : Pass("grouped_matmul_op_pass") {} @@ -45,5 +45,5 @@ class GroupedMatmulOpPass : public Pass { bool IsNotSequenceOfTensor(const abstract::AbstractBasePtr &abs); AnfNodePtr ConvertMakeTupleInputToPlantInputs(const FuncGraphPtr &graph, const CNodePtr &cnode_ptr); }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_GROUPED_MATMUL_OP_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/infershape_pass.cc b/mindspore-lite/tools/optimizer/graph/infershape_pass.cc index c4e06e04..9f00765e 100644 --- a/mindspore-lite/tools/optimizer/graph/infershape_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/infershape_pass.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_w.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { int GetCNodeCertainInputFormat(const CNodePtr cnode, int index, mindspore::Format *format) { @@ -500,4 +500,4 @@ int InferShapePass::ResetSubGraphInput() { return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/infershape_pass.h b/mindspore-lite/tools/optimizer/graph/infershape_pass.h index d3f2831f..5b2afede 100644 --- a/mindspore-lite/tools/optimizer/graph/infershape_pass.h +++ b/mindspore-lite/tools/optimizer/graph/infershape_pass.h @@ -24,7 +24,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/graph/node_infershape.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class InferShapePass : public Pass { public: @@ -54,5 +54,5 @@ class InferShapePass : public Pass { FuncGraphManagerPtr manager_{nullptr}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_INFERSHAPE_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/input_and_output_variable_pass.cc b/mindspore-lite/tools/optimizer/graph/input_and_output_variable_pass.cc index 5f79c591..5988faca 100644 --- a/mindspore-lite/tools/optimizer/graph/input_and_output_variable_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/input_and_output_variable_pass.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "ir/tensor_new.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { bool InputAndOutputVariablePass::Run(const FuncGraphPtr &graph) { MS_LOG(INFO) << "Start to run input and output variable pass"; @@ -170,4 +170,4 @@ CNodePtr InputAndOutputVariablePass::CreateAssign(const AnfNodePtr &anf_node, co return cnode; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/input_and_output_variable_pass.h b/mindspore-lite/tools/optimizer/graph/input_and_output_variable_pass.h index c6d99ca3..30ddbe97 100644 --- a/mindspore-lite/tools/optimizer/graph/input_and_output_variable_pass.h +++ b/mindspore-lite/tools/optimizer/graph/input_and_output_variable_pass.h @@ -21,7 +21,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class InputAndOutputVariablePass : public Pass { public: InputAndOutputVariablePass(std::vector inputs_variable, std::vector outputs_variable) @@ -38,5 +38,5 @@ class InputAndOutputVariablePass : public Pass { std::vector inputs_variable_index_; std::vector outputs_variable_index_; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_INPUT_AND_OUTPUT_VARIABLE_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/input_data_type_trans_pass.cc b/mindspore-lite/tools/optimizer/graph/input_data_type_trans_pass.cc index c8b686ab..f88334ca 100644 --- a/mindspore-lite/tools/optimizer/graph/input_data_type_trans_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/input_data_type_trans_pass.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_l.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_u.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kNotEqualMinIndex = 3; const std::vector kFloatDataType = {kNumberTypeFloat, kNumberTypeFloat16, kNumberTypeFloat32, @@ -147,4 +147,4 @@ bool InOutDTypeTransPass::Run(const FuncGraphPtr &graph) { } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/input_data_type_trans_pass.h b/mindspore-lite/tools/optimizer/graph/input_data_type_trans_pass.h index 33a7251d..28b360cd 100644 --- a/mindspore-lite/tools/optimizer/graph/input_data_type_trans_pass.h +++ b/mindspore-lite/tools/optimizer/graph/input_data_type_trans_pass.h @@ -22,7 +22,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "include/api/types.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class InOutDTypeTransPass : public Pass { public: explicit InOutDTypeTransPass(DataType dst_input_data_type, DataType dst_output_data_type) @@ -38,5 +38,5 @@ class InOutDTypeTransPass : public Pass { TypeId dst_input_data_type_ = kTypeUnknown; TypeId dst_output_data_type_ = kTypeUnknown; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_INPUT_DATA_TYPE_TRANS_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/int64_cast_int32_pass.cc b/mindspore-lite/tools/optimizer/graph/int64_cast_int32_pass.cc index fc3f1a8e..8769b410 100644 --- a/mindspore-lite/tools/optimizer/graph/int64_cast_int32_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/int64_cast_int32_pass.cc @@ -36,7 +36,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kNotEqualMinIndex = 3; } // namespace @@ -151,4 +151,4 @@ bool Int64CastInt32Pass::Run(const FuncGraphPtr &graph) { } return change_flag; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/int64_cast_int32_pass.h b/mindspore-lite/tools/optimizer/graph/int64_cast_int32_pass.h index 8413d080..1d3a5177 100644 --- a/mindspore-lite/tools/optimizer/graph/int64_cast_int32_pass.h +++ b/mindspore-lite/tools/optimizer/graph/int64_cast_int32_pass.h @@ -19,7 +19,7 @@ #include #include "include/backend/optimizer/pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class Int64CastInt32Pass : public Pass { public: Int64CastInt32Pass() : Pass("int64_cast_int32_pass") {} @@ -29,5 +29,5 @@ class Int64CastInt32Pass : public Pass { private: bool NotEqualInputsCheck(const CNodePtr &cnode); }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_INT64_CAST_INT32_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/kvcache_quant_pass.cc b/mindspore-lite/tools/optimizer/graph/kvcache_quant_pass.cc index d1478a22..ebe47a9a 100644 --- a/mindspore-lite/tools/optimizer/graph/kvcache_quant_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/kvcache_quant_pass.cc @@ -58,7 +58,7 @@ improve the inference performance */ -namespace mindspore::opt { +namespace mindspore::lite::opt { CNodePtr KVCacheQuantPass::NewQuantNode(const FuncGraphPtr &func_graph, const AnfNodePtr &input_node, TypeId dst_type) { MS_EXCEPTION_IF_NULL(func_graph); MS_EXCEPTION_IF_NULL(input_node); @@ -258,4 +258,4 @@ bool KVCacheQuantPass::Run(const FuncGraphPtr &func_graph) { MS_CHECK_TRUE_RET(status != lite::RET_ERROR, false); return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/kvcache_quant_pass.h b/mindspore-lite/tools/optimizer/graph/kvcache_quant_pass.h index 02ce94d3..1e3cdab4 100644 --- a/mindspore-lite/tools/optimizer/graph/kvcache_quant_pass.h +++ b/mindspore-lite/tools/optimizer/graph/kvcache_quant_pass.h @@ -21,7 +21,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class KVCacheQuantPass : public Pass { public: KVCacheQuantPass() : Pass("kvcache_quant_pass") {} @@ -39,5 +39,5 @@ class KVCacheQuantPass : public Pass { STATUS RunQuantPass(const FuncGraphPtr &func_graph, const FuncGraphManagerPtr &manager); STATUS RunAntiQuantPass(const FuncGraphPtr &func_graph, const FuncGraphManagerPtr &manager); }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_KVCACHE_QUANT_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/lite_tensor_extractor.cc b/mindspore-lite/tools/optimizer/graph/lite_tensor_extractor.cc index 27409afd..7cee54b0 100644 --- a/mindspore-lite/tools/optimizer/graph/lite_tensor_extractor.cc +++ b/mindspore-lite/tools/optimizer/graph/lite_tensor_extractor.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr int kElementShapeIndex = 1; @@ -514,4 +514,4 @@ int LiteTensorExtractor::GetCNodeOutputTensors(const CNodePtr &cnode, std::vecto return RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/lite_tensor_extractor.h b/mindspore-lite/tools/optimizer/graph/lite_tensor_extractor.h index 62cc27d3..87f79925 100644 --- a/mindspore-lite/tools/optimizer/graph/lite_tensor_extractor.h +++ b/mindspore-lite/tools/optimizer/graph/lite_tensor_extractor.h @@ -23,7 +23,7 @@ #include "src/tensor.h" #include "tools/lite_exporter/fetch_content.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class LiteTensorExtractor { public: @@ -42,5 +42,5 @@ class LiteTensorExtractor { converter::FmkType fmk_type, bool train_flag); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_LITE_TENSOR_EXTRACTOR_H_ diff --git a/mindspore-lite/tools/optimizer/graph/make_list_pass.cc b/mindspore-lite/tools/optimizer/graph/make_list_pass.cc index 99456d29..9728f5ff 100644 --- a/mindspore-lite/tools/optimizer/graph/make_list_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/make_list_pass.cc @@ -29,7 +29,7 @@ #include "mindspore/core/include/utils/trace_info.h" #include "mindspore/core/include/ir/scope.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { // From // MakeList(arg1, arg2, ...) // To @@ -312,4 +312,4 @@ bool MakeListPass::Run(const FuncGraphPtr &func_graph) { } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/make_list_pass.h b/mindspore-lite/tools/optimizer/graph/make_list_pass.h index e63f2dc8..abf9eb39 100644 --- a/mindspore-lite/tools/optimizer/graph/make_list_pass.h +++ b/mindspore-lite/tools/optimizer/graph/make_list_pass.h @@ -21,7 +21,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class MakeListPass : public Pass { public: MakeListPass() : Pass("make_list_pass") {} @@ -41,5 +41,5 @@ class MakeListPass : public Pass { AnfNodePtr ConvertListGetItemToTupleGetItem(const CNodePtr &node); AnfNodePtr ConvertMakeListToMakeTuple(const CNodePtr &node); }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_SCALAR_OP_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/miniaturization_pass.cc b/mindspore-lite/tools/optimizer/graph/miniaturization_pass.cc index 35e35075..88cc4f37 100644 --- a/mindspore-lite/tools/optimizer/graph/miniaturization_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/miniaturization_pass.cc @@ -27,7 +27,7 @@ #include "utils/check_convert_utils.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { static inline tensor::TensorPtr GetTensorFromNode(const AnfNodePtr &node) { MS_ASSERT(node != nullptr); auto value_node = node->cast(); @@ -127,4 +127,4 @@ bool MiniaturizationPass::ProcessOneCNode(const FuncGraphPtr &func_graph, const } return changed; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/miniaturization_pass.h b/mindspore-lite/tools/optimizer/graph/miniaturization_pass.h index 96f94d4a..b55a3840 100644 --- a/mindspore-lite/tools/optimizer/graph/miniaturization_pass.h +++ b/mindspore-lite/tools/optimizer/graph/miniaturization_pass.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_MINIATURIZATION_PASS_H #include "include/backend/optimizer/pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class MiniaturizationPass : public Pass { public: MiniaturizationPass() : Pass("MiniaturizationPass") {} @@ -32,6 +32,6 @@ class MiniaturizationPass : public Pass { bool ProcessOneCNode(const FuncGraphPtr &func_graph, const CNodePtr &cnode); static const ssize_t COMPRESS_TRIGGER_SIZE_ = 512; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_MINIATURIZATION_PASS_H diff --git a/mindspore-lite/tools/optimizer/graph/mul_constant_pass.cc b/mindspore-lite/tools/optimizer/graph/mul_constant_pass.cc index 3588f628..f5da7d46 100644 --- a/mindspore-lite/tools/optimizer/graph/mul_constant_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/mul_constant_pass.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr int kMulInputSize = 3; @@ -111,4 +111,4 @@ bool MulConstantPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/mul_constant_pass.h b/mindspore-lite/tools/optimizer/graph/mul_constant_pass.h index 4938f40e..20d32dcc 100644 --- a/mindspore-lite/tools/optimizer/graph/mul_constant_pass.h +++ b/mindspore-lite/tools/optimizer/graph/mul_constant_pass.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_MUL_CONSTANT_PASS_H_ #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class MulConstantPass : public Pass { public: @@ -27,5 +27,5 @@ class MulConstantPass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_MUL_CONSTANT_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/node_infershape.cc b/mindspore-lite/tools/optimizer/graph/node_infershape.cc index 3844b8aa..191464fb 100644 --- a/mindspore-lite/tools/optimizer/graph/node_infershape.cc +++ b/mindspore-lite/tools/optimizer/graph/node_infershape.cc @@ -59,7 +59,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_z.h" #include "ir/tensor_new.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { static const std::unordered_set kNNACLToOpsInfer = { // arithmetic_self @@ -689,4 +689,4 @@ abstract::AbstractBasePtr NodeInferShape::ConvertTensorListToAbstract(lite::Tens return tensor_list_abstract; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/node_infershape.h b/mindspore-lite/tools/optimizer/graph/node_infershape.h index 7862279b..e27b56b6 100644 --- a/mindspore-lite/tools/optimizer/graph/node_infershape.h +++ b/mindspore-lite/tools/optimizer/graph/node_infershape.h @@ -27,7 +27,7 @@ #include "tools/optimizer/common/format_utils.h" using mindspore::converter::FmkType; -namespace mindspore { +namespace mindspore::lite { namespace opt { class NodeInferShape { public: @@ -59,6 +59,6 @@ class NodeInferShape { bool train_flag_{false}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_NODE_INFERSHAPE_H_ diff --git a/mindspore-lite/tools/optimizer/graph/output_variable_pass.cc b/mindspore-lite/tools/optimizer/graph/output_variable_pass.cc index 6c1cd583..6f0a8167 100644 --- a/mindspore-lite/tools/optimizer/graph/output_variable_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/output_variable_pass.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "ir/tensor_new.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr size_t kInputNumber = 2; } @@ -157,4 +157,4 @@ bool OutputVariablePass::CreateDependNode(const FuncGraphPtr &graph) { graph->set_output(depend_node); return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/output_variable_pass.h b/mindspore-lite/tools/optimizer/graph/output_variable_pass.h index 7519eee9..910fdb97 100644 --- a/mindspore-lite/tools/optimizer/graph/output_variable_pass.h +++ b/mindspore-lite/tools/optimizer/graph/output_variable_pass.h @@ -21,7 +21,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class OutputVariablePass : public Pass { public: explicit OutputVariablePass(const std::vector &output_variable) : Pass("OutputVariablePass") { @@ -35,5 +35,5 @@ class OutputVariablePass : public Pass { std::vector assign_nodes_; std::vector outputs_variable_index_; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_OUTPUT_VARIABLE_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/padv3_ge_pass.cc b/mindspore-lite/tools/optimizer/graph/padv3_ge_pass.cc index fffef2f5..458882ff 100644 --- a/mindspore-lite/tools/optimizer/graph/padv3_ge_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/padv3_ge_pass.cc @@ -26,7 +26,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_p.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { /* In MindSpore, padding order starts from the last dimension and goes backward (same as PyTorch), but GE padding order @@ -258,4 +258,4 @@ bool PadV3GePass::Run(const FuncGraphPtr &func_graph) { MS_LOG(INFO) << "run padv3 pass success!"; return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/padv3_ge_pass.h b/mindspore-lite/tools/optimizer/graph/padv3_ge_pass.h index ddfd523a..b38304d5 100644 --- a/mindspore-lite/tools/optimizer/graph/padv3_ge_pass.h +++ b/mindspore-lite/tools/optimizer/graph/padv3_ge_pass.h @@ -22,7 +22,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class PadV3GePass : public Pass { public: PadV3GePass() : Pass("padv3_ge_pass") {} @@ -41,5 +41,5 @@ class PadV3GePass : public Pass { const CNodePtr CreateConcatNode(const FuncGraphPtr &func_graph, const std::vector &concat_input_vec, std::string concat_node_name); }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_PADV3_GE_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/preprocess_dynamic_shape.cc b/mindspore-lite/tools/optimizer/graph/preprocess_dynamic_shape.cc index b08d40f8..a4bd0ba2 100644 --- a/mindspore-lite/tools/optimizer/graph/preprocess_dynamic_shape.cc +++ b/mindspore-lite/tools/optimizer/graph/preprocess_dynamic_shape.cc @@ -42,7 +42,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { int DoStack(const CNodePtr &cnode, const ShapeVector &out_shape, ShapeVector *out_data) { @@ -892,4 +892,4 @@ int DynamicShapePreprocessor::DoInfer(const CNodePtr &cnode, const std::string & return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/preprocess_dynamic_shape.h b/mindspore-lite/tools/optimizer/graph/preprocess_dynamic_shape.h index 20846abe..3f3c3cdd 100644 --- a/mindspore-lite/tools/optimizer/graph/preprocess_dynamic_shape.h +++ b/mindspore-lite/tools/optimizer/graph/preprocess_dynamic_shape.h @@ -24,7 +24,7 @@ #include "ir/anf.h" #include "mindapi/base/shape_vector.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class DynamicShapePreprocessor { typedef std::map, std::vector>> ShapeContainer; @@ -42,6 +42,6 @@ class DynamicShapePreprocessor { ShapeContainer op_shape_infos_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_PREPROCESS_DYNAMIC_SHAPE_H diff --git a/mindspore-lite/tools/optimizer/graph/quant_fusion_x_offset_to_bias_pass.cc b/mindspore-lite/tools/optimizer/graph/quant_fusion_x_offset_to_bias_pass.cc index 2b61b63c..6592db83 100644 --- a/mindspore-lite/tools/optimizer/graph/quant_fusion_x_offset_to_bias_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/quant_fusion_x_offset_to_bias_pass.cc @@ -26,7 +26,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_q.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { constexpr auto kAttrNameOffset = "offset"; constexpr auto kAttrNameTransposeB = "transpose_b"; @@ -134,4 +134,4 @@ bool QuantFusionXOffsetToBias::Run(const FuncGraphPtr &func_graph) { MS_CHECK_TRUE_RET(ret == lite::RET_OK, false); return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/quant_fusion_x_offset_to_bias_pass.h b/mindspore-lite/tools/optimizer/graph/quant_fusion_x_offset_to_bias_pass.h index 37bc3cb7..96982ce9 100644 --- a/mindspore-lite/tools/optimizer/graph/quant_fusion_x_offset_to_bias_pass.h +++ b/mindspore-lite/tools/optimizer/graph/quant_fusion_x_offset_to_bias_pass.h @@ -21,7 +21,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class QuantFusionXOffsetToBias : public Pass { public: QuantFusionXOffsetToBias() : Pass("quant_fusion_x_offset_to_bias_pass") {} @@ -33,5 +33,5 @@ class QuantFusionXOffsetToBias : public Pass { float x_offset, const tensor::TensorPtr weight, bool transpose); STATUS RunQuantFusionXOffsetToBias(const FuncGraphPtr &func_graph, const FuncGraphManagerPtr &manager); }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_QUANT_FUSION_X_OFFSET_TO_BIAS_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/reduce_same_act_pass.cc b/mindspore-lite/tools/optimizer/graph/reduce_same_act_pass.cc index 4bc5a611..ff6c8fa8 100644 --- a/mindspore-lite/tools/optimizer/graph/reduce_same_act_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/reduce_same_act_pass.cc @@ -25,7 +25,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kMinUsersSize = 2; @@ -78,4 +78,4 @@ bool ReduceSameActPass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/reduce_same_act_pass.h b/mindspore-lite/tools/optimizer/graph/reduce_same_act_pass.h index 5411d251..8063243e 100644 --- a/mindspore-lite/tools/optimizer/graph/reduce_same_act_pass.h +++ b/mindspore-lite/tools/optimizer/graph/reduce_same_act_pass.h @@ -27,7 +27,7 @@ #include "tools/optimizer/graph/transpose_strategy.h" using mindspore::converter::FmkType; -namespace mindspore { +namespace mindspore::lite { namespace opt { class ReduceSameActPass : public Pass { public: @@ -36,6 +36,6 @@ class ReduceSameActPass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_REDUCE_SAME_ACT_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.cc b/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.cc index a753b60c..74070c78 100644 --- a/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.cc @@ -43,7 +43,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_w.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { const size_t kIndexNum = 2; int ReplaceUpdateStateWithMonad(const FuncGraphPtr &func_graph, const CNodePtr &cnode, bool remove_side_effect) { @@ -611,4 +611,4 @@ bool RemoveRedundantOpPass::Run(const FuncGraphPtr &func_graph) { remove_cnode_.clear(); return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.h b/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.h index 02fe29b8..e9f69aea 100644 --- a/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.h +++ b/mindspore-lite/tools/optimizer/graph/redundant_op_remove_pass.h @@ -23,7 +23,7 @@ #include "tools/lite_exporter/fetch_content.h" using mindspore::converter::FmkType; -namespace mindspore::opt { +namespace mindspore::lite::opt { class RemoveRedundantOpPass : public Pass { public: explicit RemoveRedundantOpPass(bool is_train_model) @@ -54,5 +54,5 @@ class RemoveRedundantOpPass : public Pass { bool keep_update_state_ = false; std::set remove_cnode_; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_REDUNDANT_OP_REMOVE_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/remove_load_pass.cc b/mindspore-lite/tools/optimizer/graph/remove_load_pass.cc index 0507bb54..0f5a5dae 100644 --- a/mindspore-lite/tools/optimizer/graph/remove_load_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/remove_load_pass.cc @@ -19,7 +19,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_a.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_l.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { bool RemoveLoadPass::Run(const mindspore::FuncGraphPtr &func_graph) { // Remove Load Node auto mng = func_graph->manager(); @@ -59,4 +59,4 @@ bool RemoveLoadPass::Run(const mindspore::FuncGraphPtr &func_graph) { } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/remove_load_pass.h b/mindspore-lite/tools/optimizer/graph/remove_load_pass.h index 6cb81106..390022cf 100644 --- a/mindspore-lite/tools/optimizer/graph/remove_load_pass.h +++ b/mindspore-lite/tools/optimizer/graph/remove_load_pass.h @@ -17,12 +17,12 @@ #ifndef MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_REMOVE_LOAD_PASS_H #define MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_REMOVE_LOAD_PASS_H #include "include/backend/optimizer/pass.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class RemoveLoadPass : public Pass { public: RemoveLoadPass() : Pass("RemoveLoadPass") {} ~RemoveLoadPass() override = default; bool Run(const FuncGraphPtr &func_graph) override; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_REMOVE_LOAD_PASS_H diff --git a/mindspore-lite/tools/optimizer/graph/scalar_op_pass.cc b/mindspore-lite/tools/optimizer/graph/scalar_op_pass.cc index 3e217b5f..23f7d7bf 100644 --- a/mindspore-lite/tools/optimizer/graph/scalar_op_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/scalar_op_pass.cc @@ -89,7 +89,7 @@ remove both, the Tensor are connected. ############### */ -namespace mindspore::opt { +namespace mindspore::lite::opt { /* This function returns the index of the input node, which is used by the user node. */ @@ -649,4 +649,4 @@ bool ScalarOpPass::Run(const FuncGraphPtr &func_graph) { } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/scalar_op_pass.h b/mindspore-lite/tools/optimizer/graph/scalar_op_pass.h index 79487271..ef983b4d 100644 --- a/mindspore-lite/tools/optimizer/graph/scalar_op_pass.h +++ b/mindspore-lite/tools/optimizer/graph/scalar_op_pass.h @@ -21,7 +21,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class ScalarOpPass : public Pass { public: ScalarOpPass() : Pass("scalar_op_pass") {} @@ -53,5 +53,5 @@ class ScalarOpPass : public Pass { STATUS RunRemoveTensorToScalarPass(const FuncGraphPtr &func_graph, const FuncGraphManagerPtr &manager); STATUS RunArithmeticCheckPass(const FuncGraphPtr &func_graph, const FuncGraphManagerPtr &manager); }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_SCALAR_OP_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/send_op_add_control_depend.cc b/mindspore-lite/tools/optimizer/graph/send_op_add_control_depend.cc index c958e6a4..cbf81cb7 100644 --- a/mindspore-lite/tools/optimizer/graph/send_op_add_control_depend.cc +++ b/mindspore-lite/tools/optimizer/graph/send_op_add_control_depend.cc @@ -22,7 +22,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "ir/tensor_new.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { const auto kDataToControl = "data_to_control"; @@ -76,4 +76,4 @@ const AnfNodePtr SendOpAddControlDepend::Process(const FuncGraphPtr &func_graph, } #endif } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/send_op_add_control_depend.h b/mindspore-lite/tools/optimizer/graph/send_op_add_control_depend.h index 8d7cfb7a..614d28aa 100644 --- a/mindspore-lite/tools/optimizer/graph/send_op_add_control_depend.h +++ b/mindspore-lite/tools/optimizer/graph/send_op_add_control_depend.h @@ -26,7 +26,7 @@ #include "tools/optimizer/common/pattern_process_pass_extends.h" #include "mindspore/ops/op_def/nn_optimizer_ops.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class SendOpAddControlDepend : public opt::LitePatternProcessPass { public: @@ -38,5 +38,5 @@ class SendOpAddControlDepend : public opt::LitePatternProcessPass { const AnfNodePtr Process(const FuncGraphPtr &, const AnfNodePtr &node, const EquivPtr &) const override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_SEND_OP_ADD_CONTROL_DEPEND_H_ diff --git a/mindspore-lite/tools/optimizer/graph/slice_prepose_pass.cc b/mindspore-lite/tools/optimizer/graph/slice_prepose_pass.cc index 3ac929ac..e8711c0f 100644 --- a/mindspore-lite/tools/optimizer/graph/slice_prepose_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/slice_prepose_pass.cc @@ -41,7 +41,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { const int kArithmeticInputNum = 2; const int SliceBeginIndex = 2; @@ -1564,4 +1564,4 @@ bool SlicePreposePass::Run(const FuncGraphPtr &graph) { } return changed; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/slice_prepose_pass.h b/mindspore-lite/tools/optimizer/graph/slice_prepose_pass.h index 33736f55..dec73e11 100644 --- a/mindspore-lite/tools/optimizer/graph/slice_prepose_pass.h +++ b/mindspore-lite/tools/optimizer/graph/slice_prepose_pass.h @@ -26,7 +26,7 @@ #include "include/registry/converter_context.h" using mindspore::converter::FmkType; -namespace mindspore::opt { +namespace mindspore::lite::opt { using lite::RET_ERROR; using lite::RET_OK; using lite::STATUS; @@ -95,6 +95,6 @@ class SlicePreposePass : public Pass { private: FmkType fmk_type = converter::kFmkTypeOnnx; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_SLICE_PREPOSE_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/special_node_postprocess.cc b/mindspore-lite/tools/optimizer/graph/special_node_postprocess.cc index 08d26b72..2b72a9a6 100644 --- a/mindspore-lite/tools/optimizer/graph/special_node_postprocess.cc +++ b/mindspore-lite/tools/optimizer/graph/special_node_postprocess.cc @@ -32,7 +32,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_w.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { const PrimitivePtr kPrimInstanceNorm = std::make_shared("InstanceNorm"); @@ -175,4 +175,4 @@ int SpecialNodePostProcess::HandleInstanceNorm(const FuncGraphPtr &func_graph, c return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/special_node_postprocess.h b/mindspore-lite/tools/optimizer/graph/special_node_postprocess.h index 56a753b3..a6355a46 100644 --- a/mindspore-lite/tools/optimizer/graph/special_node_postprocess.h +++ b/mindspore-lite/tools/optimizer/graph/special_node_postprocess.h @@ -19,7 +19,7 @@ #include "include/backend/optimizer/pass.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class SpecialNodePostProcess : public Pass { public: @@ -32,6 +32,6 @@ class SpecialNodePostProcess : public Pass { int HandleInstanceNorm(const FuncGraphPtr &func_graph, const CNodePtr &cnode); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_SPECIAL_NODE_POSTPROCESS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/specify_graph_input_format.cc b/mindspore-lite/tools/optimizer/graph/specify_graph_input_format.cc index 5b73b8bf..1bac6f65 100644 --- a/mindspore-lite/tools/optimizer/graph/specify_graph_input_format.cc +++ b/mindspore-lite/tools/optimizer/graph/specify_graph_input_format.cc @@ -31,7 +31,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_r.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool SpecifyGraphInputFormat::Run(const FuncGraphPtr &graph) { MS_CHECK_TRUE_MSG(graph != nullptr, false, "graph is nullptr!"); @@ -232,4 +232,4 @@ bool SpecifyGraphInputFormat::GetCurGraphInputFormat(const FuncGraphPtr &func_gr return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/specify_graph_input_format.h b/mindspore-lite/tools/optimizer/graph/specify_graph_input_format.h index 12d51a4c..0c84d5bb 100644 --- a/mindspore-lite/tools/optimizer/graph/specify_graph_input_format.h +++ b/mindspore-lite/tools/optimizer/graph/specify_graph_input_format.h @@ -23,7 +23,7 @@ #include "include/api/types.h" #include "include/registry/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class SpecifyGraphInputFormat : public Pass { public: @@ -44,6 +44,6 @@ class SpecifyGraphInputFormat : public Pass { mindspore::Format cur_graph_input_format_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_SPECIFY_GRAPH_INPUT_FORMAT_H_ diff --git a/mindspore-lite/tools/optimizer/graph/specify_graph_output_format.cc b/mindspore-lite/tools/optimizer/graph/specify_graph_output_format.cc index 18debeb7..8d396bea 100644 --- a/mindspore-lite/tools/optimizer/graph/specify_graph_output_format.cc +++ b/mindspore-lite/tools/optimizer/graph/specify_graph_output_format.cc @@ -33,7 +33,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_d.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool SpecifyGraphOutputFormat::Run(const FuncGraphPtr &graph) { MS_ASSERT(graph != nullptr); @@ -136,4 +136,4 @@ STATUS SpecifyGraphOutputFormat::HandleGraphOutput(const FuncGraphPtr &graph) { return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/specify_graph_output_format.h b/mindspore-lite/tools/optimizer/graph/specify_graph_output_format.h index 1b75110c..b154c125 100644 --- a/mindspore-lite/tools/optimizer/graph/specify_graph_output_format.h +++ b/mindspore-lite/tools/optimizer/graph/specify_graph_output_format.h @@ -23,7 +23,7 @@ #include "include/api/types.h" #include "include/registry/converter_context.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class SpecifyGraphOutputFormat : public Pass { public: @@ -37,6 +37,6 @@ class SpecifyGraphOutputFormat : public Pass { mindspore::Format exp_graph_output_format_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_SPECIFY_GRAPH_OUTPUT_FORMAT_H_ diff --git a/mindspore-lite/tools/optimizer/graph/split_one_pass.cc b/mindspore-lite/tools/optimizer/graph/split_one_pass.cc index 558dd11a..6e790ec7 100644 --- a/mindspore-lite/tools/optimizer/graph/split_one_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/split_one_pass.cc @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kMinCnodeSize = 2; @@ -79,4 +79,4 @@ bool SplitOnePass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/split_one_pass.h b/mindspore-lite/tools/optimizer/graph/split_one_pass.h index 408ceb16..f6bb62ce 100644 --- a/mindspore-lite/tools/optimizer/graph/split_one_pass.h +++ b/mindspore-lite/tools/optimizer/graph/split_one_pass.h @@ -27,7 +27,7 @@ #include "tools/optimizer/graph/transpose_strategy.h" using mindspore::converter::FmkType; -namespace mindspore { +namespace mindspore::lite { namespace opt { class SplitOnePass : public Pass { public: @@ -36,5 +36,5 @@ class SplitOnePass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_SPLIT_ONE_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/split_with_size_op_pass.cc b/mindspore-lite/tools/optimizer/graph/split_with_size_op_pass.cc index 5160acdb..27646f3f 100644 --- a/mindspore-lite/tools/optimizer/graph/split_with_size_op_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/split_with_size_op_pass.cc @@ -33,7 +33,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { AnfNodePtr SplitWithSizeOpPass::SplitWithSizeMapperToSplitV(const FuncGraphPtr &func_graph, const CNodePtr &cnode) { MS_EXCEPTION_IF_NULL(cnode); @@ -101,4 +101,4 @@ bool SplitWithSizeOpPass::Run(const FuncGraphPtr &func_graph) { MS_CHECK_TRUE_RET(status != lite::RET_ERROR, false); return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/split_with_size_op_pass.h b/mindspore-lite/tools/optimizer/graph/split_with_size_op_pass.h index c74019ac..7d60e87b 100644 --- a/mindspore-lite/tools/optimizer/graph/split_with_size_op_pass.h +++ b/mindspore-lite/tools/optimizer/graph/split_with_size_op_pass.h @@ -22,7 +22,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class SplitWithSizeOpPass : public Pass { public: SplitWithSizeOpPass() : Pass("split_with_size_op_pass") {} @@ -38,5 +38,5 @@ class SplitWithSizeOpPass : public Pass { const std::string kAttrNameSizeSplits = "size_splits"; const std::string kAttrNameSplitDim = "split_dim"; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_SPLIT_WITH_SIZE_OP_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/transpose_strategy.cc b/mindspore-lite/tools/optimizer/graph/transpose_strategy.cc index afccb911..1ae85f31 100644 --- a/mindspore-lite/tools/optimizer/graph/transpose_strategy.cc +++ b/mindspore-lite/tools/optimizer/graph/transpose_strategy.cc @@ -41,7 +41,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_s.h" #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kFirstInput = 1; @@ -580,4 +580,4 @@ void TransposeStrategy::DecidePreAndPostTransType(const TransTypePair *trans_inf } } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/transpose_strategy.h b/mindspore-lite/tools/optimizer/graph/transpose_strategy.h index c4ac9eb7..063b651f 100644 --- a/mindspore-lite/tools/optimizer/graph/transpose_strategy.h +++ b/mindspore-lite/tools/optimizer/graph/transpose_strategy.h @@ -25,7 +25,7 @@ #include "tools/optimizer/graph/node_infershape.h" using mindspore::converter::FmkType; -namespace mindspore { +namespace mindspore::lite { namespace opt { class TransposeStrategy { public: @@ -54,6 +54,6 @@ class TransposeStrategy { NodeInferShape node_infer_shape_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_TRANSPOSE_STRATEGY_H_ diff --git a/mindspore-lite/tools/optimizer/graph/unused_add_node_remove_pass.cc b/mindspore-lite/tools/optimizer/graph/unused_add_node_remove_pass.cc index 7c16ab2f..4e2485a6 100644 --- a/mindspore-lite/tools/optimizer/graph/unused_add_node_remove_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/unused_add_node_remove_pass.cc @@ -30,7 +30,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_m.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { constexpr size_t kAddInputSize = 3; @@ -107,4 +107,4 @@ bool RemoveUnusedAddNodePass::Run(const FuncGraphPtr &func_graph) { return true; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/graph/unused_add_node_remove_pass.h b/mindspore-lite/tools/optimizer/graph/unused_add_node_remove_pass.h index 249a9c9d..1cc8c35f 100644 --- a/mindspore-lite/tools/optimizer/graph/unused_add_node_remove_pass.h +++ b/mindspore-lite/tools/optimizer/graph/unused_add_node_remove_pass.h @@ -27,7 +27,7 @@ #include "tools/optimizer/graph/transpose_strategy.h" using mindspore::converter::FmkType; -namespace mindspore { +namespace mindspore::lite { namespace opt { class RemoveUnusedAddNodePass : public Pass { public: @@ -36,6 +36,6 @@ class RemoveUnusedAddNodePass : public Pass { bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_REMOVE_ADD_0_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc b/mindspore-lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc index 5bdbb51f..d7f6515f 100644 --- a/mindspore-lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc @@ -28,7 +28,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_t.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { constexpr size_t kTransposeInput = 1; constexpr size_t kTransposeInputNum = 3; const std::vector kPermNCHW{0, 3, 1, 2}; @@ -119,4 +119,4 @@ bool RemoveUnusedTransposeOpPass::Run(const FuncGraphPtr &func_graph) { } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/unused_transpose_node_remove_pass.h b/mindspore-lite/tools/optimizer/graph/unused_transpose_node_remove_pass.h index e5e95fa1..7d6b547a 100644 --- a/mindspore-lite/tools/optimizer/graph/unused_transpose_node_remove_pass.h +++ b/mindspore-lite/tools/optimizer/graph/unused_transpose_node_remove_pass.h @@ -21,7 +21,7 @@ #include "include/registry/converter_context.h" using mindspore::converter::FmkType; -namespace mindspore::opt { +namespace mindspore::lite::opt { class RemoveUnusedTransposeOpPass : public Pass { public: RemoveUnusedTransposeOpPass() : Pass("remove_unused_cast_pass") {} @@ -32,5 +32,5 @@ class RemoveUnusedTransposeOpPass : public Pass { private: FmkType fmk_type = converter::kFmkTypeTf; }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_UNUSED_TRANSPOSE_NODE_REMOVE_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/graph/update_conv2d_param_pass.cc b/mindspore-lite/tools/optimizer/graph/update_conv2d_param_pass.cc index 0c274ea3..cf28356b 100644 --- a/mindspore-lite/tools/optimizer/graph/update_conv2d_param_pass.cc +++ b/mindspore-lite/tools/optimizer/graph/update_conv2d_param_pass.cc @@ -26,7 +26,7 @@ #include "mindspore/ops/op_def/auto_generate/gen_ops_primitive_c.h" #include "mindspore/core/include/ir/graph_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { namespace { void SetConvAttr(const PrimitivePtr &prim, const std::vector &kernel_size, int64_t in_channel, int64_t out_channel) { @@ -121,4 +121,4 @@ bool UpdateConv2DParamPass::Run(const FuncGraphPtr &func_graph) { } return true; } -} // namespace mindspore::opt +} // namespace mindspore::lite::opt diff --git a/mindspore-lite/tools/optimizer/graph/update_conv2d_param_pass.h b/mindspore-lite/tools/optimizer/graph/update_conv2d_param_pass.h index 79b926b9..b6f867eb 100644 --- a/mindspore-lite/tools/optimizer/graph/update_conv2d_param_pass.h +++ b/mindspore-lite/tools/optimizer/graph/update_conv2d_param_pass.h @@ -20,7 +20,7 @@ #include "include/backend/optimizer/pass.h" #include "tools/optimizer/common/gllo_utils.h" -namespace mindspore::opt { +namespace mindspore::lite::opt { class UpdateConv2DParamPass : public Pass { public: UpdateConv2DParamPass() : Pass("UpdateConv2DParamPass") {} @@ -30,5 +30,5 @@ class UpdateConv2DParamPass : public Pass { private: STATUS UpdateConv2DAttr(const CNodePtr &cnode); }; -} // namespace mindspore::opt +} // namespace mindspore::lite::opt #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_UPDATE_CONV2D_PARAM_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/parallel/conv2d_info.cc b/mindspore-lite/tools/optimizer/parallel/conv2d_info.cc index 06daaae8..8af3cd98 100644 --- a/mindspore-lite/tools/optimizer/parallel/conv2d_info.cc +++ b/mindspore-lite/tools/optimizer/parallel/conv2d_info.cc @@ -36,7 +36,7 @@ #include "utils/anf_utils.h" using mindspore::schema::PrimitiveType_Conv2DFusion; -namespace mindspore { +namespace mindspore::lite { namespace opt { constexpr auto kConvWithBias = 4; constexpr auto kAnfConvInput = 1; @@ -412,4 +412,4 @@ int Conv2DInfo::InferReplaceOp() { OPERATOR_INFO_REGISTER(PrimitiveType_Conv2DFusion, kNumberTypeFloat32, false, OperatorInfoCreator) OPERATOR_INFO_REGISTER(PrimitiveType_Conv2DFusion, kNumberTypeInt8, false, OperatorInfoCreator) } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/parallel/conv2d_info.h b/mindspore-lite/tools/optimizer/parallel/conv2d_info.h index 2318b59e..23e90b6d 100644 --- a/mindspore-lite/tools/optimizer/parallel/conv2d_info.h +++ b/mindspore-lite/tools/optimizer/parallel/conv2d_info.h @@ -25,7 +25,7 @@ #include "infer/cxx_api/conv2d_fusion.h" #include "include/errorcode.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class Conv2DInfo : public OperatorInfo { @@ -55,6 +55,6 @@ class Conv2DInfo : public OperatorInfo { size_t dev_index, int cin_sum, int cout_sum); }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_CONV2D_INFO_H_ diff --git a/mindspore-lite/tools/optimizer/parallel/depthwise_conv2d_info.cc b/mindspore-lite/tools/optimizer/parallel/depthwise_conv2d_info.cc index 7cb15ba0..aec18207 100644 --- a/mindspore-lite/tools/optimizer/parallel/depthwise_conv2d_info.cc +++ b/mindspore-lite/tools/optimizer/parallel/depthwise_conv2d_info.cc @@ -39,7 +39,7 @@ using mindspore::schema::PrimitiveType_Conv2DFusion; #include "ir/tensor_new.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { void SplitConstantData(char *in_data, char **out_data, int64_t num_split, int64_t split_dim_size, int64_t element_bytes, @@ -521,4 +521,4 @@ int DepthwiseConv2DInfo::InferReplaceOp() { OPERATOR_INFO_REGISTER(PrimitiveType_Conv2DFusion, kNumberTypeFloat32, true, OperatorInfoCreator) OPERATOR_INFO_REGISTER(PrimitiveType_Conv2DFusion, kNumberTypeInt8, true, OperatorInfoCreator) } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/parallel/depthwise_conv2d_info.h b/mindspore-lite/tools/optimizer/parallel/depthwise_conv2d_info.h index 81e6ebec..76816f05 100644 --- a/mindspore-lite/tools/optimizer/parallel/depthwise_conv2d_info.h +++ b/mindspore-lite/tools/optimizer/parallel/depthwise_conv2d_info.h @@ -23,7 +23,7 @@ #include "tools/optimizer/parallel/conv2d_info.h" #include "infer/cxx_api/conv2d_fusion.h" #include "include/errorcode.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class DepthwiseConv2DInfo : public Conv2DInfo { public: @@ -63,6 +63,6 @@ class DepthwiseConv2DInfo : public Conv2DInfo { }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_DEPTHWISE_CONV2D_INFO_H_ diff --git a/mindspore-lite/tools/optimizer/parallel/multi_conv_info.cc b/mindspore-lite/tools/optimizer/parallel/multi_conv_info.cc index d1435844..175dd35c 100644 --- a/mindspore-lite/tools/optimizer/parallel/multi_conv_info.cc +++ b/mindspore-lite/tools/optimizer/parallel/multi_conv_info.cc @@ -25,7 +25,7 @@ #include "ops_utils/op_utils.h" using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; -namespace mindspore { +namespace mindspore::lite { namespace opt { int MultiConvSplit::GenSplitInfo() { split_info_.out_num = static_cast(this->strategy_.dev_num); @@ -302,4 +302,4 @@ AnfNodePtr MultiConvSplitCIN::SplitMultiConv(const AnfNodePtr &node) { return nu AnfNodePtr MultiConvSplitCOUT::SplitMultiConv(const AnfNodePtr &node) { return nullptr; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/parallel/multi_conv_info.h b/mindspore-lite/tools/optimizer/parallel/multi_conv_info.h index aaf8e57c..7f96da33 100644 --- a/mindspore-lite/tools/optimizer/parallel/multi_conv_info.h +++ b/mindspore-lite/tools/optimizer/parallel/multi_conv_info.h @@ -20,7 +20,7 @@ #include "tools/optimizer/parallel/multi_node_split.h" #include "tools/optimizer/fisson/fisson_util.h" #include "infer/cxx_api/conv2d_fusion.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { class MultiConvSplit : public MultiNodeSplit { public: @@ -112,5 +112,5 @@ class MultiConvSplitH final : public MultiConvSplit { }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_MULTI_CONV_INFO_H_ diff --git a/mindspore-lite/tools/optimizer/parallel/multi_node_split.cc b/mindspore-lite/tools/optimizer/parallel/multi_node_split.cc index 3cb6fae1..d44d8885 100644 --- a/mindspore-lite/tools/optimizer/parallel/multi_node_split.cc +++ b/mindspore-lite/tools/optimizer/parallel/multi_node_split.cc @@ -18,7 +18,7 @@ #include "tools/optimizer/parallel/multi_node_split.h" #include "tools/optimizer/parallel/multi_conv_info.h" #include "nnacl_c/op_base.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { int MultiNodeSplitProxy::InitResource() { @@ -58,4 +58,4 @@ AnfNodePtr MultiNodeSplitProxy::DoSplit(const FuncGraphPtr &func_graph, const An } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/parallel/multi_node_split.h b/mindspore-lite/tools/optimizer/parallel/multi_node_split.h index f3fab761..5c9a89e9 100644 --- a/mindspore-lite/tools/optimizer/parallel/multi_node_split.h +++ b/mindspore-lite/tools/optimizer/parallel/multi_node_split.h @@ -23,7 +23,7 @@ #include "base/base.h" using mindspore::schema::PrimitiveType; -namespace mindspore { +namespace mindspore::lite { namespace opt { class MultiNodeSplit { public: @@ -57,5 +57,5 @@ class MultiNodeSplitProxy : public MultiNodeSplit { std::shared_ptr multi_node_split_{nullptr}; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_MULTI_NODE_SPLIT_H_ diff --git a/mindspore-lite/tools/optimizer/parallel/operator_info.cc b/mindspore-lite/tools/optimizer/parallel/operator_info.cc index 8075b7ce..6522e921 100644 --- a/mindspore-lite/tools/optimizer/parallel/operator_info.cc +++ b/mindspore-lite/tools/optimizer/parallel/operator_info.cc @@ -26,7 +26,7 @@ #include "ops_utils/op_utils.h" #include "src/common/log_util.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { bool is_any_none(const std::vector &split) { return std::any_of(split.begin(), split.end(), [](int64_t v) { return v == static_cast(NoSplit); }); @@ -224,4 +224,4 @@ int OperatorInfo::DoSplit() { return lite::RET_OK; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/parallel/operator_info.h b/mindspore-lite/tools/optimizer/parallel/operator_info.h index cdb33fcd..77e823b4 100644 --- a/mindspore-lite/tools/optimizer/parallel/operator_info.h +++ b/mindspore-lite/tools/optimizer/parallel/operator_info.h @@ -28,7 +28,7 @@ #include "schema/model_generated.h" #include "include/errorcode.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { /** * Do following steps to make a operator support parallel: @@ -103,6 +103,6 @@ bool is_any_none(const std::vector &split); bool is_any_not_none(const std::vector &split); } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_OPERATOR_INFO_H_ diff --git a/mindspore-lite/tools/optimizer/parallel/operator_info_register.cc b/mindspore-lite/tools/optimizer/parallel/operator_info_register.cc index d99d46a1..0a61950f 100644 --- a/mindspore-lite/tools/optimizer/parallel/operator_info_register.cc +++ b/mindspore-lite/tools/optimizer/parallel/operator_info_register.cc @@ -16,7 +16,7 @@ #include "tools/optimizer/parallel/operator_info_register.h" #include -namespace mindspore { +namespace mindspore::lite { namespace opt { // find the only key of operator_info @@ -62,4 +62,4 @@ OperatorInfoRegister::OperatorInfoRegister(schema::PrimitiveType operator_type, OperatorInfoFactory::GeInstance()->RegisterOperatorInfo(operator_type, type_id, is_depth_wise, creator_func); } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/parallel/operator_info_register.h b/mindspore-lite/tools/optimizer/parallel/operator_info_register.h index 01dc0cab..0386c761 100644 --- a/mindspore-lite/tools/optimizer/parallel/operator_info_register.h +++ b/mindspore-lite/tools/optimizer/parallel/operator_info_register.h @@ -23,7 +23,7 @@ #include #include "tools/optimizer/parallel/operator_info.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { using OperatorInfoCreatorFunc = std::function(const std::string &name, const SplitStrategy &strategy)>; @@ -85,6 +85,6 @@ class OperatorInfoRegister { static OperatorInfoRegister g_name##operator_type##type_id##is_depth_wise##Creator(operator_type, type_id, \ is_depth_wise, creator_func); } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_OPERATOR_INFO_REGISTER_H_ diff --git a/mindspore-lite/tools/optimizer/parallel/parallel_pass.cc b/mindspore-lite/tools/optimizer/parallel/parallel_pass.cc index 4dd3e50d..c49d7db7 100644 --- a/mindspore-lite/tools/optimizer/parallel/parallel_pass.cc +++ b/mindspore-lite/tools/optimizer/parallel/parallel_pass.cc @@ -23,7 +23,7 @@ #include "nnacl_c/op_base.h" #include "ops_utils/op_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { namespace { @@ -142,4 +142,4 @@ AnfNodePtr ParallelPass::Run(const FuncGraphPtr &func_graph, const AnfNodePtr &n return parallel_operator->replace_op(); } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/parallel/parallel_pass.h b/mindspore-lite/tools/optimizer/parallel/parallel_pass.h index 9fd4bb47..acc7e03c 100644 --- a/mindspore-lite/tools/optimizer/parallel/parallel_pass.h +++ b/mindspore-lite/tools/optimizer/parallel/parallel_pass.h @@ -28,7 +28,7 @@ #ifndef MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_PARALLEL_PASS_H_ #define MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_PARALLEL_PASS_H_ -namespace mindspore { +namespace mindspore::lite { namespace opt { class ParallelPass : public opt::LiteNodePass { public: @@ -56,6 +56,6 @@ class ParallelPass : public opt::LiteNodePass { }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_PARALLEL_PASS_H_ diff --git a/mindspore-lite/tools/optimizer/parallel/split_strategy.cc b/mindspore-lite/tools/optimizer/parallel/split_strategy.cc index e76b036a..92556a01 100644 --- a/mindspore-lite/tools/optimizer/parallel/split_strategy.cc +++ b/mindspore-lite/tools/optimizer/parallel/split_strategy.cc @@ -22,7 +22,7 @@ #include "nnacl_c/op_base.h" #include "src/common/log_util.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { int64_t ApproximateFLOPs(const std::vector &strides, const std::vector &input_shape, @@ -85,4 +85,4 @@ std::unordered_map ParserSplitStrategy(const st return split_strategys; } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/parallel/split_strategy.h b/mindspore-lite/tools/optimizer/parallel/split_strategy.h index 745e0882..a02a3e77 100644 --- a/mindspore-lite/tools/optimizer/parallel/split_strategy.h +++ b/mindspore-lite/tools/optimizer/parallel/split_strategy.h @@ -27,7 +27,7 @@ #include "mindspore/ops/op_def/lite_ops.h" #include "include/lite_types.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { constexpr auto PARALLEL_NAME_SUFFIX = "_parallel"; @@ -109,5 +109,5 @@ std::unordered_map ParserSplitStrategy( SplitMode split_mode); } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_SPLIT_STRATEGY_H_ diff --git a/mindspore-lite/tools/optimizer/parallel/spliter.cc b/mindspore-lite/tools/optimizer/parallel/spliter.cc index 443a9c17..199cdd75 100644 --- a/mindspore-lite/tools/optimizer/parallel/spliter.cc +++ b/mindspore-lite/tools/optimizer/parallel/spliter.cc @@ -21,7 +21,7 @@ #include "tools/optimizer/parallel/split_strategy.h" #include "ops_utils/op_utils.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { Spliter *Spliter::GetInstance() { static Spliter spliter; @@ -147,4 +147,4 @@ void Spliter::UpdateNodeOutputShapes(const std::string &node_name, const std::ve } } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite diff --git a/mindspore-lite/tools/optimizer/parallel/spliter.h b/mindspore-lite/tools/optimizer/parallel/spliter.h index 985c6248..6f703c88 100644 --- a/mindspore-lite/tools/optimizer/parallel/spliter.h +++ b/mindspore-lite/tools/optimizer/parallel/spliter.h @@ -24,7 +24,7 @@ #include "include/common/utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" #include "include/lite_types.h" -namespace mindspore { +namespace mindspore::lite { namespace opt { struct IntCompare { bool operator()(const int &lhs, const int &rhs) const { return lhs > rhs; } @@ -82,5 +82,5 @@ class Spliter { std::set match_numbers_; }; } // namespace opt -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_PARALLEL_SPLITER_H_ -- Gitee From ad69cbf3eb706b0c24b45142c0d6954488bc86a7 Mon Sep 17 00:00:00 2001 From: jjfeing Date: Tue, 23 Sep 2025 19:07:26 +0800 Subject: [PATCH 2/2] fix rebase --- .../cxx_api/graph/acl/acl_convert_init_adapter.cc | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.cc b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.cc index ec3f9b6d..75732641 100644 --- a/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.cc +++ b/mindspore-lite/tools/converter/adapter/acl/cxx_api_lite/cxx_api/graph/acl/acl_convert_init_adapter.cc @@ -65,7 +65,6 @@ ge::graphStatus AclConvertInitAdapter::AclBuildInit(const std::map lock(build_flag_mutex_); @@ -74,7 +73,4 @@ void AclConvertInitAdapter::AclBuildFinalize() { ge::aclgrphBuildFinalize(); } } -} // namespace mindspore -======= } // namespace mindspore::lite ->>>>>>> add nameapce lite -- Gitee