diff --git a/cv/classification/cspdarknet53/pytorch/README.md b/cv/classification/cspdarknet53/pytorch/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5fd274239c8c66ddb3675869f9848d0c88f80745 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/README.md @@ -0,0 +1,38 @@ +# CspDarknet53 + +## Model description + +This is an implementation of CSPDarknet53 in pytorch. + +## Step 1: Installing + +```bash +pip3 install torch +pip3 install torchvision +``` + +## Step 2: Training + +### One single GPU + +```bash +export CUDA_VISIBLE_DEVICES=0 +python3 train.py --batch-size 64 --epochs 120 --data-path /home/datasets/cv/imagenet +``` + +### 8 GPUs on one machine +```bash +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +python3 -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --batch-size 64 --epochs 120 --data-path /home/datasets/cv/imagenet +``` + +## Result + +| GPU | FP32 | +| ----------- | ------------------------------------ | +| 8 cards | Acc@1 76.644 fps 830 | +| 8 cards | fps 148 | + +## Reference + +https://github.com/WongKinYiu/CrossStagePartialNetworks diff --git a/cv/classification/cspdarknet53/pytorch/__init__.py b/cv/classification/cspdarknet53/pytorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..011573976ed1c1e69c9282a42400acc3e68c3586 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from .utils import * +from .common_utils import * +from .data_loader import * + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/cv/classification/cspdarknet53/pytorch/common_utils/__init__.py b/cv/classification/cspdarknet53/pytorch/common_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7d2e011f7cec1af806b4635fcf65849b55f1111b --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/common_utils/__init__.py @@ -0,0 +1,38 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +import numpy as np + +from .dist import * +from .metric_logger import * +from .misc import * +from .smooth_value import * + +def manual_seed(seed, deterministic=False): + random.seed(seed) + np.random.seed(seed) + os.environ['PYTHONHASHSEED'] = str(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + if deterministic: + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + else: + torch.backends.cudnn.deterministic = False + torch.backends.cudnn.benchmark = True diff --git a/cv/classification/cspdarknet53/pytorch/common_utils/dist.py b/cv/classification/cspdarknet53/pytorch/common_utils/dist.py new file mode 100644 index 0000000000000000000000000000000000000000..767b6ce0008b75447ed75016f7e973ab086b992e --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/common_utils/dist.py @@ -0,0 +1,156 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from collections import defaultdict, deque +import datetime +import errno +import os +import time + +import torch +import torch.distributed as dist + + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def get_dist_backend(args=None): + DIST_BACKEND_ENV = "PT_DIST_BACKEND" + if DIST_BACKEND_ENV in os.environ: + return os.environ[DIST_BACKEND_ENV] + + if args is None: + args = dict() + + backend_attr_name = "dist_backend" + + if hasattr(args, backend_attr_name): + return getattr(args, backend_attr_name) + + if backend_attr_name in args: + return args[backend_attr_name] + + return "nccl" + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + dist_backend = get_dist_backend(args) + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + data_list = [None] * world_size + dist.all_gather_object(data_list, data) + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict diff --git a/cv/classification/cspdarknet53/pytorch/common_utils/metric_logger.py b/cv/classification/cspdarknet53/pytorch/common_utils/metric_logger.py new file mode 100644 index 0000000000000000000000000000000000000000..ab9c61b0ba26212fb9404268bfd6f681a35f66c7 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/common_utils/metric_logger.py @@ -0,0 +1,106 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from collections import defaultdict +import datetime +import time + +import torch +from .smooth_value import SmoothedValue + +""" +Examples: + +logger = MetricLogger(" ") + +>>> # For iter dataloader +>>> metric_logger.add_meter('img/s', utils.SmoothedValue(window_size=10, fmt='{value}')) +>>> header = 'Epoch: [{}]'.format(epoch) +>>> for image, target in metric_logger.log_every(data_loader, print_freq, header): +>>> ... +>>> logger.metric_logger.meters['img/s'].update(fps) + +""" + +class MetricLogger(object): + + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {}'.format(header, total_time_str)) diff --git a/cv/classification/cspdarknet53/pytorch/common_utils/misc.py b/cv/classification/cspdarknet53/pytorch/common_utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..8c5e49bcd21622b00e41551257ced4473c97b450 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/common_utils/misc.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import sys +import errno + + +def mkdir(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise diff --git a/cv/classification/cspdarknet53/pytorch/common_utils/smooth_value.py b/cv/classification/cspdarknet53/pytorch/common_utils/smooth_value.py new file mode 100644 index 0000000000000000000000000000000000000000..8c1fadae25ec148a0af2cc55a544e098d6393247 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/common_utils/smooth_value.py @@ -0,0 +1,88 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from collections import defaultdict, deque +import datetime +import errno +import os +import time + +import torch +import torch.distributed as dist +from .dist import is_dist_avail_and_initialized + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float32, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) diff --git a/cv/classification/cspdarknet53/pytorch/dataloader/__init__.py b/cv/classification/cspdarknet53/pytorch/dataloader/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..66b217b8507dfb048e3a366fc0fe6374f24eedc4 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/dataloader/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. \ No newline at end of file diff --git a/cv/classification/cspdarknet53/pytorch/dataloader/classification.py b/cv/classification/cspdarknet53/pytorch/dataloader/classification.py new file mode 100644 index 0000000000000000000000000000000000000000..317a9874e0c561de93b0eae48f3e664422447974 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/dataloader/classification.py @@ -0,0 +1,120 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import os +import time + +import torch +import torchvision +from .utils import presets_classification as presets + +""" +Examples: + +>>> dataset_train, dataset_val = load_data(train_dir, val_dir, args) +""" + + +def get_datasets(traindir, + valdir, + resize_size=256, + crop_size=224, + auto_augment_policy=None, + random_erase_prob=0.): + # Data loading code + print("Loading data") + print("Loading training data") + dataset = torchvision.datasets.ImageFolder( + traindir, + presets.ClassificationPresetTrain(crop_size=crop_size, auto_augment_policy=auto_augment_policy, + random_erase_prob=random_erase_prob)) + + print("Loading validation data") + dataset_test = torchvision.datasets.ImageFolder( + valdir, + presets.ClassificationPresetEval(crop_size=crop_size, resize_size=resize_size)) + + return dataset, dataset_test + + +def get_input_size(model): + biger_input_size_models = ['inception'] + resize_size = 256 + crop_size = 224 + for bi_model in biger_input_size_models: + if bi_model in model: + resize_size = 342 + crop_size = 299 + + return resize_size, crop_size + + +def load_data(train_dir, val_dir, args): + auto_augment_policy = getattr(args, "auto_augment", None) + random_erase_prob = getattr(args, "random_erase", 0.0) + resize_size, crop_size = get_input_size(args.model) + dataset, dataset_test = get_datasets(train_dir, val_dir, + auto_augment_policy=auto_augment_policy, + random_erase_prob=random_erase_prob, + resize_size=resize_size, + crop_size=crop_size) + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) + test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test) + else: + train_sampler = torch.utils.data.RandomSampler(dataset) + test_sampler = torch.utils.data.SequentialSampler(dataset_test) + + return dataset, dataset_test, train_sampler, test_sampler + + +def _create_torch_dataloader(train_dir, val_dir, args): + dataset, dataset_test, train_sampler, test_sampler = load_data(train_dir, val_dir, args) + data_loader = torch.utils.data.DataLoader( + dataset, batch_size=args.batch_size, + sampler=train_sampler, num_workers=args.workers, pin_memory=True) + + data_loader_test = torch.utils.data.DataLoader( + dataset_test, batch_size=args.batch_size, + sampler=test_sampler, num_workers=args.workers, pin_memory=True) + + return data_loader, data_loader_test + + +def _create_dali_dataloader(train_dir, val_dir, args): + from .dali_classification import get_imagenet_iter_dali + device = torch.cuda.current_device() + _, crop_size = get_input_size(args.model) + data_loader = get_imagenet_iter_dali('train', train_dir, args.batch_size, + num_threads=args.workers, + device_id=device, + size=crop_size) + data_loader_test = get_imagenet_iter_dali('val', train_dir, args.batch_size, + num_threads=args.workers, + device_id=device, + size=crop_size) + + return data_loader, data_loader_test + + +def create_dataloader(train_dir, val_dir, args): + print("Creating data loaders") + if args.dali: + train_dir = os.path.dirname(train_dir) + val_dir = os.path.dirname(val_dir) + return _create_dali_dataloader(train_dir, val_dir, args) + return _create_torch_dataloader(train_dir, val_dir, args) diff --git a/cv/classification/cspdarknet53/pytorch/dataloader/dali_classification.py b/cv/classification/cspdarknet53/pytorch/dataloader/dali_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..2918bef662408deb14f4ca3b500a64b65bdcc28d --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/dataloader/dali_classification.py @@ -0,0 +1,129 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + + +import os + +import nvidia.dali.ops as ops +import nvidia.dali.types as types +from nvidia.dali.pipeline import Pipeline +from nvidia.dali.plugin.pytorch import DALIClassificationIterator, DALIGenericIterator + +class HybridTrainPipe(Pipeline): + def __init__(self, batch_size, num_threads, device_id, data_dir, size): + super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id) + self.input = ops.FileReader(file_root=data_dir, random_shuffle=True) + self.decode = ops.ImageDecoder(device="cpu", output_type=types.RGB) + self.res = ops.RandomResizedCrop(device="gpu", size=size, random_area=[0.08, 1.25]) + self.cmnp = ops.CropMirrorNormalize(device="gpu", + output_dtype=types.FLOAT, + output_layout=types.NCHW, + image_type=types.RGB, + mean=[0.485 * 255, 0.456 * 255, 0.406 * 255], + std=[0.229 * 255, 0.224 * 255, 0.225 * 255]) + + def define_graph(self): + self.jpegs, self.labels = self.input(name="Reader") + + images = self.decode(self.jpegs) + images = self.res(images.gpu()) + output = self.cmnp(images) + return [output, self.labels] + + +class HybridValPipe(Pipeline): + def __init__(self, batch_size, num_threads, device_id, data_dir, size): + super(HybridValPipe, self).__init__(batch_size, num_threads, device_id) + self.input = ops.FileReader(file_root=data_dir, random_shuffle=False) + self.decode = ops.ImageDecoder(device="cpu", output_type=types.RGB) + self.res = ops.Resize(device="gpu", resize_x=size, resize_y=size) + self.cmnp = ops.CropMirrorNormalize(device="gpu", + output_dtype=types.FLOAT, + output_layout=types.NCHW, + crop=(size, size), + image_type=types.RGB, + mean=[0.485 * 255, 0.456 * 255, 0.406 * 255], + std=[0.229 * 255, 0.224 * 255, 0.225 * 255]) + + def define_graph(self): + self.jpegs, self.labels = self.input(name="Reader") + + images = self.decode(self.jpegs) + images = self.res(images.gpu()) + output = self.cmnp(images) + return [output, self.labels] + + +def get_imagenet_iter_dali(type, image_dir, batch_size, num_threads, device_id, size): + if type == 'train': + pip_train = HybridTrainPipe(batch_size=batch_size, num_threads=num_threads, device_id=device_id, + data_dir = os.path.join(image_dir, "train"), + size=size) + pip_train.build() + dali_iter_train = DALIClassificationIterator(pip_train, size=pip_train.epoch_size("Reader")) + return dali_iter_train + elif type == 'val': + pip_val = HybridValPipe(batch_size=batch_size, num_threads=num_threads, device_id=device_id, + data_dir = os.path.join(image_dir, "val"), + size=size) + pip_val.build() + dali_iter_val = DALIClassificationIterator(pip_val, size=pip_val.epoch_size("Reader")) + return dali_iter_val + + +def main(arguments): + parser = argparse.ArgumentParser() + parser.add_argument('--data_dir', help='directory to save data to', type=str, default='classification data') + args = parser.parse_args(arguments) + + train_loader = get_imagenet_iter_dali(type='train', image_dir=args.data_dir, + batch_size=256, + num_threads=4, size=224, device_id=3) + + val_loader = get_imagenet_iter_dali(type="val", image_dir=args.data_dir, + batch_size=256, + num_threads=4, size=224, device_id=3) + + print('start dali train dataloader.') + start = time.time() + for epoch in range(20): + for i, data in enumerate(train_loader): + images = data[0]["data"].cuda(non_blocking=True) + labels = data[0]["label"].squeeze().long().cuda(non_blocking=True) + + # WARN: Very important + train_loader.reset() + print("Epoch", epoch) + print('dali iterate time: %fs' % (time.time() - start)) + print('end dali train dataloader.') + + + print('start dali val dataloader.') + start = time.time() + for i, data in enumerate(val_loader): + images = data[0]["data"].cuda(non_blocking=True) + print(images.shape) + labels = data[0]["label"].squeeze().long().cuda(non_blocking=True) + print(labels.shape) + print('dali iterate time: %fs' % (time.time() - start)) + print('end dali val dataloader.') + + +if __name__ == '__main__': + import os, time, sys + import argparse + sys.exit(main(sys.argv[1:])) diff --git a/cv/classification/cspdarknet53/pytorch/dataloader/utils/__init__.py b/cv/classification/cspdarknet53/pytorch/dataloader/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..66b217b8507dfb048e3a366fc0fe6374f24eedc4 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/dataloader/utils/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. \ No newline at end of file diff --git a/cv/classification/cspdarknet53/pytorch/dataloader/utils/presets_classification.py b/cv/classification/cspdarknet53/pytorch/dataloader/utils/presets_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..59688a9595b8c1d2d8b2a9f23c228da63b047a06 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/dataloader/utils/presets_classification.py @@ -0,0 +1,54 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from torchvision.transforms import autoaugment, transforms + + +class ClassificationPresetTrain: + def __init__(self, crop_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), hflip_prob=0.5, + auto_augment_policy=None, random_erase_prob=0.0): + trans = [transforms.RandomResizedCrop(crop_size)] + if hflip_prob > 0: + trans.append(transforms.RandomHorizontalFlip(hflip_prob)) + if auto_augment_policy is not None: + aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy) + trans.append(autoaugment.AutoAugment(policy=aa_policy)) + trans.extend([ + transforms.ToTensor(), + transforms.Normalize(mean=mean, std=std), + ]) + if random_erase_prob > 0: + trans.append(transforms.RandomErasing(p=random_erase_prob)) + + self.transforms = transforms.Compose(trans) + + def __call__(self, img): + return self.transforms(img) + + +class ClassificationPresetEval: + def __init__(self, crop_size, resize_size=256, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): + + self.transforms = transforms.Compose([ + transforms.Resize(resize_size), + transforms.CenterCrop(crop_size), + transforms.ToTensor(), + transforms.Normalize(mean=mean, std=std), + ]) + + def __call__(self, img): + return self.transforms(img) diff --git a/cv/classification/cspdarknet53/pytorch/model/csdarknet53.py b/cv/classification/cspdarknet53/pytorch/model/csdarknet53.py new file mode 100644 index 0000000000000000000000000000000000000000..74bd36cc1bf7cf5f83ccc45af333c398e6eb8e6c --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/model/csdarknet53.py @@ -0,0 +1,67 @@ +import torch.nn as nn +from .cslayers import * +import torch.backends.cudnn as cudnn +from torchsummary import summary + +__all__ = ['CsDarkNet53'] + +class CsDarkNet53(nn.Module): + def __init__(self, num_classes): + super(CsDarkNet53, self).__init__() + + input_channels = 32 + + # Network + self.stage1 = Conv2dBatchLeaky(3, input_channels, 3, 1, activation='mish') + self.stage2 = Stage2(input_channels) + self.stage3 = Stage3(4*input_channels) + self.stage4 = Stage(4*input_channels, 8) + self.stage5 = Stage(8*input_channels, 8) + self.stage6 = Stage(16*input_channels, 4) + + self.conv = Conv2dBatchLeaky(32*input_channels, 32*input_channels, 1, 1, activation='mish') + self.avgpool = nn.AdaptiveAvgPool2d((1,1)) + self.fc = nn.Linear(1024, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def forward(self, x): + stage1 = self.stage1(x) + stage2 = self.stage2(stage1) + stage3 = self.stage3(stage2) + stage4 = self.stage4(stage3) + stage5 = self.stage5(stage4) + stage6 = self.stage6(stage5) + + conv = self.conv(stage6) + x = self.avgpool(conv) + x = x.view(-1, 1024) + x = self.fc(x) + + return x + +if __name__ == "__main__": + use_cuda = torch.cuda.is_available() + if use_cuda: + device = torch.device("cuda") + cudnn.benchmark = True + else: + device = torch.device("cpu") + + darknet = CsDarkNet53(num_classes=10) + darknet = darknet.cuda() + with torch.no_grad(): + darknet.eval() + data = torch.rand(1, 3, 256, 256) + data = data.cuda() + try: + #print(darknet) + summary(darknet,(3,256,256)) + print(darknet(data)) + except Exception as e: + print(e) diff --git a/cv/classification/cspdarknet53/pytorch/model/cslayers.py b/cv/classification/cspdarknet53/pytorch/model/cslayers.py new file mode 100644 index 0000000000000000000000000000000000000000..0f5f7c78a05f6a9511b0e4beb40518b16941d8c9 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/model/cslayers.py @@ -0,0 +1,176 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +# mish(x) = x * tanh(log(1 + e^x)) +class Mish(nn.Module): + def __init__(self): + super(Mish, self).__init__() + + def forward(self, x): + return x * torch.tanh(F.softplus(x)) + +class Conv2dBatchLeaky(nn.Module): + """ + This convenience layer groups a 2D convolution, a batchnorm and a leaky ReLU. + """ + def __init__(self, in_channels, out_channels, kernel_size, stride, activation='leaky', leaky_slope=0.1): + super(Conv2dBatchLeaky, self).__init__() + + # Parameters + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.stride = stride + if isinstance(kernel_size, (list, tuple)): + self.padding = [int(k/2) for k in kernel_size] + else: + self.padding = int(kernel_size/2) + self.leaky_slope = leaky_slope + # self.mish = Mish() + + # Layer + if activation == "leaky": + self.layers = nn.Sequential( + nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, bias=False), + nn.BatchNorm2d(self.out_channels), + nn.LeakyReLU(self.leaky_slope, inplace=True) + ) + elif activation == "mish": + self.layers = nn.Sequential( + nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, bias=False), + nn.BatchNorm2d(self.out_channels), + Mish() + ) + elif activation == "linear": + self.layers = nn.Sequential( + nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, bias=False) + ) + + def __repr__(self): + s = '{name} ({in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}, padding={padding}, negative_slope={leaky_slope})' + return s.format(name=self.__class__.__name__, **self.__dict__) + + def forward(self, x): + x = self.layers(x) + return x + +class SmallBlock(nn.Module): + + def __init__(self, nchannels): + super().__init__() + self.features = nn.Sequential( + Conv2dBatchLeaky(nchannels, nchannels, 1, 1, activation='mish'), + Conv2dBatchLeaky(nchannels, nchannels, 3, 1, activation='mish') + ) + # conv_shortcut + ''' + 参考 https://github.com/bubbliiiing/yolov4-pytorch + shortcut后不接任何conv + ''' + # self.active_linear = Conv2dBatchLeaky(nchannels, nchannels, 1, 1, activation='linear') + # self.conv_shortcut = Conv2dBatchLeaky(nchannels, nchannels, 1, 1, activation='mish') + + + def forward(self, data): + short_cut = data + self.features(data) + # active_linear = self.conv_shortcut(short_cut) + + return short_cut + +# Stage1 conv [256,256,3]->[256,256,32] + +class Stage2(nn.Module): + + def __init__(self, nchannels): + super().__init__() + # stage2 32 + self.conv1 = Conv2dBatchLeaky(nchannels, 2*nchannels, 3, 2, activation='mish') + self.split0 = Conv2dBatchLeaky(2*nchannels, 2*nchannels, 1, 1, activation='mish') + self.split1 = Conv2dBatchLeaky(2*nchannels, 2*nchannels, 1, 1, activation='mish') + + self.conv2 = Conv2dBatchLeaky(2*nchannels, nchannels, 1, 1, activation='mish') + self.conv3 = Conv2dBatchLeaky(nchannels, 2*nchannels, 3, 1, activation='mish') + + self.conv4 = Conv2dBatchLeaky(2*nchannels, 2*nchannels, 1, 1, activation='mish') + + + def forward(self, data): + conv1 = self.conv1(data) + split0 = self.split0(conv1) + split1 = self.split1(conv1) + conv2 = self.conv2(split1) + conv3 = self.conv3(conv2) + + shortcut = split1 + conv3 + conv4 = self.conv4(shortcut) + + route = torch.cat([split0, conv4], dim=1) + return route + +class Stage3(nn.Module): + def __init__(self, nchannels): + super().__init__() + # stage3 128 + self.conv1 = Conv2dBatchLeaky(nchannels, int(nchannels/2), 1, 1, activation='mish') + self.conv2 = Conv2dBatchLeaky(int(nchannels/2), nchannels, 3, 2, activation='mish') + + self.split0 = Conv2dBatchLeaky(nchannels, int(nchannels/2), 1, 1, activation='mish') + self.split1 = Conv2dBatchLeaky(nchannels, int(nchannels/2), 1, 1, activation='mish') + + self.block1 = SmallBlock(int(nchannels/2)) + self.block2 = SmallBlock(int(nchannels/2)) + + self.conv3 = Conv2dBatchLeaky(int(nchannels/2), int(nchannels/2), 1, 1, activation='mish') + + def forward(self, data): + conv1 = self.conv1(data) + conv2 = self.conv2(conv1) + + split0 = self.split0(conv2) + split1 = self.split1(conv2) + + block1 = self.block1(split1) + block2 = self.block2(block1) + + conv3 = self.conv3(block2) + + route = torch.cat([split0, conv3], dim=1) + + return route + +# Stage4 Stage5 Stage6 +class Stage(nn.Module): + def __init__(self, nchannels, nblocks): + super().__init__() + # stage4 : 128 + # stage5 : 256 + # stage6 : 512 + self.conv1 = Conv2dBatchLeaky(nchannels, nchannels, 1, 1, activation='mish') + self.conv2 = Conv2dBatchLeaky(nchannels, 2*nchannels, 3, 2, activation='mish') + self.split0 = Conv2dBatchLeaky(2*nchannels, nchannels, 1, 1, activation='mish') + self.split1 = Conv2dBatchLeaky(2*nchannels, nchannels, 1, 1, activation='mish') + blocks = [] + for i in range(nblocks): + blocks.append(SmallBlock(nchannels)) + self.blocks = nn.Sequential(*blocks) + self.conv4 = Conv2dBatchLeaky(nchannels, nchannels, 1, 1, activation='mish') + + def forward(self,data): + conv1 = self.conv1(data) + conv2 = self.conv2(conv1) + + split0 = self.split0(conv2) + split1 = self.split1(conv2) + blocks = self.blocks(split1) + conv4 = self.conv4(blocks) + route = torch.cat([split0, conv4], dim=1) + + return route + + + + + + + diff --git a/cv/classification/cspdarknet53/pytorch/train.py b/cv/classification/cspdarknet53/pytorch/train.py new file mode 100644 index 0000000000000000000000000000000000000000..9afa8dc9d1b45673d223f0b195e4bf36dce18615 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/train.py @@ -0,0 +1,318 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime +import os + +import time + +import torch +import torch.utils.data + +try: + from torch.cuda.amp import autocast, GradScaler + scaler = GradScaler() +except: + autocast = None + scaler = None + + +from torch import nn +import torch.distributed as dist +import torchvision + +from utils_ import (MetricLogger, SmoothedValue, accuracy, mkdir,\ + init_distributed_mode, manual_seed,\ + is_main_process, save_on_master, get_world_size) + +from dataloader.classification import get_datasets, create_dataloader + +from model.csdarknet53 import CsDarkNet53 + +def compute_loss(model, image, target, criterion): + output = model(image) + if not isinstance(output, (tuple, list)): + output = [output] + losses = [] + for out in output: + losses.append(criterion(out, target)) + loss = sum(losses) + return loss, output[0] + + +def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, print_freq, amp=False, use_dali=False): + model.train() + metric_logger = MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}')) + metric_logger.add_meter('img/s', SmoothedValue(window_size=10, fmt='{value}')) + + header = 'Epoch: [{}]'.format(epoch) + all_fps = [] + for data in metric_logger.log_every(data_loader, print_freq, header): + if use_dali: + image, target = data[0]["data"], data[0]["label"][:, 0].long() + else: + image, target = data + start_time = time.time() + image, target = image.to(device), target.to(device) + if autocast is None or not amp: + loss, output = compute_loss(model, image, target, criterion) + else: + with autocast(): + loss, output = compute_loss(model, image, target, criterion) + + optimizer.zero_grad() + if scaler is not None and amp: + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + else: + loss.backward() + optimizer.step() + + torch.cuda.synchronize() + end_time = time.time() + + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + batch_size = image.shape[0] + metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"]) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + fps = batch_size / (end_time - start_time) * get_world_size() + metric_logger.meters['img/s'].update(fps) + all_fps.append(fps) + + print(header, 'Avg img/s:', sum(all_fps) / len(all_fps)) + + +def evaluate(model, criterion, data_loader, device, print_freq=100, use_dali=False): + model.eval() + metric_logger = MetricLogger(delimiter=" ") + header = 'Test:' + with torch.no_grad(): + for data in metric_logger.log_every(data_loader, print_freq, header): + if use_dali: + image, target = data[0]["data"], data[0]["label"][:, 0].long() + else: + image, target = data + image = image.to(device, non_blocking=True) + target = target.to(device, non_blocking=True) + output = model(image) + loss = criterion(output, target) + + acc1, acc5 = accuracy(output, target, topk=(1, 5)) + # FIXME need to take into account that the datasets + # could have been padded in distributed setup + batch_size = image.shape[0] + metric_logger.update(loss=loss.item()) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + + print(' * Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f}' + .format(top1=metric_logger.acc1, top5=metric_logger.acc5)) + return metric_logger.acc1.global_avg + + +def _get_cache_path(filepath): + import hashlib + h = hashlib.sha1(filepath.encode()).hexdigest() + cache_path = os.path.join("~", ".torch", "vision", "datasets", "imagefolder", h[:10] + ".pt") + cache_path = os.path.expanduser(cache_path) + return cache_path + + +def main(args): + if args.output_dir: + mkdir(args.output_dir) + + init_distributed_mode(args) + print(args) + + device = torch.device(args.device) + + manual_seed(args.seed, deterministic=False) + # torch.backends.cudnn.benchmark = True + + # WARN: + if dist.is_initialized(): + num_gpu = dist.get_world_size() + else: + num_gpu = 1 + + global_batch_size = num_gpu * args.batch_size + + train_dir = os.path.join(args.data_path, 'train') + val_dir = os.path.join(args.data_path, 'val') + + num_classes = len(os.listdir(train_dir)) + if 0 < num_classes < 13: + if global_batch_size > 512: + if is_main_process(): + print("WARN: Updating global batch size to 512, avoid non-convergence when training small dataset.") + args.batch_size = 512 // num_gpu + + if args.pretrained: + num_classes = 1000 + + data_loader, data_loader_test = create_dataloader(train_dir, val_dir, args) + + print("Creating model") + model = CsDarkNet53(num_classes=num_classes) + print(model) + model.to(device) + if args.distributed and args.sync_bn: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + + criterion = nn.CrossEntropyLoss() + + opt_name = args.opt.lower() + if opt_name == 'sgd': + optimizer = torch.optim.SGD( + model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + elif opt_name == 'rmsprop': + optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum, + weight_decay=args.weight_decay, eps=0.0316, alpha=0.9) + else: + raise RuntimeError("Invalid optimizer {}. Only SGD and RMSprop are supported.".format(args.opt)) + + lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma) + + model_without_ddp = model + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + model_without_ddp = model.module + + if args.resume: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + args.start_epoch = checkpoint['epoch'] + 1 + + if args.test_only: + evaluate(model, criterion, data_loader_test, device=device) + return + + print("Start training") + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + epoch_start_time = time.time() + if args.distributed and not args.dali: + data_loader.sampler.set_epoch(epoch) + train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args.print_freq, args.amp, use_dali=args.dali) + lr_scheduler.step() + evaluate(model, criterion, data_loader_test, device=device, use_dali=args.dali) + if args.output_dir: + checkpoint = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'lr_scheduler': lr_scheduler.state_dict(), + 'epoch': epoch, + 'args': args} + #save_on_master( + # checkpoint, + # os.path.join(args.output_dir, 'model_{}.pth'.format(epoch))) + save_on_master( + checkpoint, + os.path.join(args.output_dir, 'checkpoint.pth')) + epoch_total_time = time.time() - epoch_start_time + epoch_total_time_str = str(datetime.timedelta(seconds=int(epoch_total_time))) + print('epoch time {}'.format(epoch_total_time_str)) + + if args.dali: + data_loader.reset() + data_loader_test.reset() + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +def get_args_parser(add_help=True): + import argparse + parser = argparse.ArgumentParser(description='PyTorch Classification Training', add_help=add_help) + + parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', help='dataset') + parser.add_argument('--model', default='', help='model') + parser.add_argument('--device', default='cuda', help='device') + parser.add_argument('-b', '--batch-size', default=32, type=int) + parser.add_argument('--epochs', default=90, type=int, metavar='N', + help='number of total epochs to run') + parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 4)') + parser.add_argument('--opt', default='sgd', type=str, help='optimizer') + parser.add_argument('--lr', default=0.1, type=float, help='initial learning rate') + parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') + parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') + parser.add_argument('--lr-step-size', default=30, type=int, help='decrease lr every step-size epochs') + parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma') + parser.add_argument('--print-freq', default=10, type=int, help='print frequency') + parser.add_argument('--output-dir', default='.', help='path where to save') + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument( + "--cache-dataset", + dest="cache_dataset", + help="Cache the datasets for quicker initialization. It also serializes the transforms", + action="store_true", + ) + parser.add_argument( + "--sync-bn", + dest="sync_bn", + help="Use sync batch norm", + action="store_true", + ) + parser.add_argument( + "--test-only", + dest="test_only", + help="Only test the model", + action="store_true", + ) + parser.add_argument( + "--pretrained", + dest="pretrained", + help="Use pre-trained models from the modelzoo", + action="store_true", + ) + parser.add_argument('--auto-augment', default=None, help='auto augment policy (default: None)') + parser.add_argument('--random-erase', default=0.0, type=float, help='random erasing probability (default: 0.0)') + parser.add_argument( + "--dali", + help="Use dali as dataloader", + default=False, + action="store_true", + ) + + # distributed training parameters + parser.add_argument('--local_rank', default=-1, type=int, + help='Local rank') + parser.add_argument('--world-size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training') + parser.add_argument('--amp', action='store_true', help='Automatic Mixed Precision training') + parser.add_argument('--seed', default=42, type=int, help='Random seed') + return parser + + +if __name__ == "__main__": + args = get_args_parser().parse_args() + main(args) diff --git a/cv/classification/cspdarknet53/pytorch/utils_.py b/cv/classification/cspdarknet53/pytorch/utils_.py new file mode 100644 index 0000000000000000000000000000000000000000..2a25edf2a95b3c59987c640e71e91fd15cbecc89 --- /dev/null +++ b/cv/classification/cspdarknet53/pytorch/utils_.py @@ -0,0 +1,169 @@ +# Copyright (c) 2022, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from collections import defaultdict, deque, OrderedDict +import copy +import datetime +import hashlib +import time +import torch +import torch.distributed as dist + +import errno +import os + +from common_utils import * + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target[None]) + + res = [] + for k in topk: + correct_k = correct[:k].flatten().sum(dtype=torch.float32) + res.append(correct_k * (100.0 / batch_size)) + return res + + +def average_checkpoints(inputs): + """Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from: + https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16 + + Args: + inputs (List[str]): An iterable of string paths of checkpoints to load from. + Returns: + A dict of string keys mapping to various values. The 'model' key + from the returned dict should correspond to an OrderedDict mapping + string parameter names to torch Tensors. + """ + params_dict = OrderedDict() + params_keys = None + new_state = None + num_models = len(inputs) + for fpath in inputs: + with open(fpath, "rb") as f: + state = torch.load( + f, + map_location=( + lambda s, _: torch.serialization.default_restore_location(s, "cpu") + ), + ) + # Copies over the settings from the first checkpoint + if new_state is None: + new_state = state + model_params = state["model"] + model_params_keys = list(model_params.keys()) + if params_keys is None: + params_keys = model_params_keys + elif params_keys != model_params_keys: + raise KeyError( + "For checkpoint {}, expected list of params: {}, " + "but found: {}".format(f, params_keys, model_params_keys) + ) + for k in params_keys: + p = model_params[k] + if isinstance(p, torch.HalfTensor): + p = p.float() + if k not in params_dict: + params_dict[k] = p.clone() + # NOTE: clone() is needed in case of p is a shared parameter + else: + params_dict[k] += p + averaged_params = OrderedDict() + for k, v in params_dict.items(): + averaged_params[k] = v + if averaged_params[k].is_floating_point(): + averaged_params[k].div_(num_models) + else: + averaged_params[k] //= num_models + new_state["model"] = averaged_params + return new_state + + +def store_model_weights(model, checkpoint_path, checkpoint_key='model', strict=True): + """ + This method can be used to prepare weights files for new models. It receives as + input a model architecture and a checkpoint from the training script and produces + a file with the weights ready for release. + + Examples: + from torchvision import models as M + + # Classification + model = M.mobilenet_v3_large(pretrained=False) + print(store_model_weights(model, './class.pth')) + + # Quantized Classification + model = M.quantization.mobilenet_v3_large(pretrained=False, quantize=False) + model.fuse_model() + model.qconfig = torch.quantization.get_default_qat_qconfig('qnnpack') + _ = torch.quantization.prepare_qat(model, inplace=True) + print(store_model_weights(model, './qat.pth')) + + # Object Detection + model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False) + print(store_model_weights(model, './obj.pth')) + + # Segmentation + model = M.segmentation.deeplabv3_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, aux_loss=True) + print(store_model_weights(model, './segm.pth', strict=False)) + + Args: + model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes. + checkpoint_path (str): The path of the checkpoint we will load. + checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored. + Default: "model". + strict (bool): whether to strictly enforce that the keys + in :attr:`state_dict` match the keys returned by this module's + :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` + + Returns: + output_path (str): The location where the weights are saved. + """ + # Store the new model next to the checkpoint_path + checkpoint_path = os.path.abspath(checkpoint_path) + output_dir = os.path.dirname(checkpoint_path) + + # Deep copy to avoid side-effects on the model object. + model = copy.deepcopy(model) + checkpoint = torch.load(checkpoint_path, map_location='cpu') + + # Load the weights to the model to validate that everything works + # and remove unnecessary weights (such as auxiliaries, etc) + model.load_state_dict(checkpoint[checkpoint_key], strict=strict) + + tmp_path = os.path.join(output_dir, str(model.__hash__())) + torch.save(model.state_dict(), tmp_path) + + sha256_hash = hashlib.sha256() + with open(tmp_path, "rb") as f: + # Read and update hash string value in blocks of 4K + for byte_block in iter(lambda: f.read(4096), b""): + sha256_hash.update(byte_block) + hh = sha256_hash.hexdigest() + + output_path = os.path.join(output_dir, "weights-" + str(hh[:8]) + ".pth") + os.replace(tmp_path, output_path) + + return output_path