140 Star 958 Fork 665

MindSpore/mindformers

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
run_gpt2_13b.yaml 4.50 KB
一键复制 编辑 原始数据 按行查看 历史
yiyison 提交于 2024-08-07 19:33 . 删除graph_kernel_flags相关信息
seed: 0
run_mode: 'train'
output_dir: './output' # path to save checkpoint/strategy
load_checkpoint: ""
src_strategy_path_or_dir: ''
auto_trans_ckpt: False # If true, auto transform load_checkpoint to load in distributed model
only_save_strategy: False
resume_training: False
# context
context:
jit_config:
jit_level: "O2" # GE
mode: 0 #0--Graph Mode; 1--Pynative Mode
device_target: "Ascend"
enable_graph_kernel: False
max_call_depth: 10000
max_device_memory: "30GB"
save_graphs: False
save_graphs_path: "./graph"
device_id: 0
# aicc
remote_save_url: "Please input obs url on AICC platform."
# runner
runner_config:
epochs: 2
batch_size: 2
sink_mode: True
sink_size: 2
runner_wrapper:
type: MFTrainOneStepCell
scale_sense:
type: DynamicLossScaleUpdateCell
loss_scale_value: 4294967296
scale_factor: 2
scale_window: 1000
use_clip_grad: True
# parallel
use_parallel: True
parallel:
parallel_mode: 1 # 0-data parallel, 1-semi-auto parallel, 2-auto parallel, 3-hybrid parallel
gradients_mean: False
loss_repeated_mean: True
full_batch: True
search_mode: "sharding_propagation"
enable_parallel_optimizer: True
strategy_ckpt_save_file: "./ckpt_strategy.ckpt"
parallel_config:
data_parallel: 4
model_parallel: 8
pipeline_stage: 1
use_seq_parallel: False
micro_batch_num: 1
vocab_emb_dp: True
gradient_aggregation_group: 4
micro_batch_interleave_num: 1
# moe
moe_config:
expert_num: 1
capacity_factor: 1.05
aux_loss_factor: 0.05
num_experts_chosen: 1
# recompute
recompute_config:
recompute: True
select_recompute: False
parallel_optimizer_comm_recompute: False
mp_comm_recompute: True
recompute_slice_activation: False
# autotune
auto_tune: True
filepath_prefix: './autotune'
autotune_per_step: 10
# profile
profile: False
profile_start_step: 1
profile_stop_step: 10
init_start_profile: True
profile_communication: True
profile_memory: True
# Trainer
trainer:
type: CausalLanguageModelingTrainer
model_name: 'gpt2_13b'
# if True, do evaluate during the training process. if false, do nothing.
# note that the task trainer should support _evaluate_in_training function.
do_eval: False
# train dataset
train_dataset: &train_dataset
data_loader:
type: MindDataset
dataset_dir: ""
shuffle: True
input_columns: ["input_ids", "attention_mask"]
num_parallel_workers: 8
python_multiprocessing: False
drop_remainder: True
batch_size: 8
repeat: 1
numa_enable: False
prefetch_size: 1
train_dataset_task:
type: CausalLanguageModelDataset
dataset_config: *train_dataset
# eval dataset
eval_dataset: &eval_dataset
data_loader:
type: MindDataset
dataset_dir: ""
shuffle: False
input_columns: ["input_ids", "attention_mask"]
num_parallel_workers: 8
python_multiprocessing: False
drop_remainder: False
repeat: 1
numa_enable: False
prefetch_size: 1
eval_dataset_task:
type: CausalLanguageModelDataset
dataset_config: *eval_dataset
# model
model:
model_config:
batch_size: 1
type: GPT2Config
max_position_embeddings: 2048
seq_length: 2048
vocab_size: 50257
hidden_size: 5120
num_layers: 40
num_heads: 40
expand_ratio: 4
hidden_act: "fast_gelu"
use_flash_attention: False
use_prompt_flash_attention: False
hidden_dropout_rate: 0.1
attention_dropout_rate: 0.1
param_init_type: "float16"
layernorm_compute_type: "float32"
softmax_compute_type: "float16"
compute_dtype: "float16"
checkpoint_name_or_path: ""
eos_token_id: 50256
repetition_penalty: 1
max_decode_length: 2048
top_k: 5
top_p: 1
do_sample: True
use_past: False
arch:
type: GPT2LMHeadModel
# lr schedule
lr_schedule:
type: polynomial
learning_rate: 0.00005
lr_end: 0.000001
warmup_steps: 2000
total_steps: -1 # -1 means it will load the total steps of the dataset
layer_scale: False
layer_decay: 0.65
# optimizer
optimizer:
type: FusedAdamWeightDecay
beta1: 0.9
beta2: 0.95
eps: 0.00000001 # 1e-8
weight_decay: 0.1
lr_scale: False
lr_scale_factor: 256
# callbacks
callbacks:
- type: MFLossMonitor
- type: CheckpointMonitor
prefix: "gpt2-13b"
save_checkpoint_steps: 500
integrated_save: False
async_save: False
- type: ObsMonitor
eval_callbacks:
- type: ObsMonitor
# metric
metric:
type: PerplexityMetric
# processor
processor:
return_tensors: ms
tokenizer:
unk_token: '<|endoftext|>'
bos_token: '<|endoftext|>'
eos_token: '<|endoftext|>'
pad_token: '<|endoftext|>'
type: GPT2Tokenizer
type: GPT2Processor
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/mindspore/mindformers.git
git@gitee.com:mindspore/mindformers.git
mindspore
mindformers
mindformers
r1.3.0

搜索帮助