代码拉取完成,页面将自动刷新
seed: 0
output_dir: './output' # path to save checkpoint/strategy
load_checkpoint: './internlm.ckpt'
auto_trans_ckpt: False # If true, auto transform load_checkpoint to load in distributed model
only_save_strategy: False
resume_training: False
use_parallel: False
run_mode: 'predict'
# trainer config
trainer:
type: CausalLanguageModelingTrainer
model_name: 'internlm_20b'
# runner config
runner_config:
epochs: 1
batch_size: 1
sink_mode: True
sink_size: 2
# eval dataset
eval_dataset: &eval_dataset
data_loader:
type: MindDataset
dataset_dir: ""
shuffle: False
input_columns: ["input_ids"]
num_parallel_workers: 8
python_multiprocessing: False
drop_remainder: False
repeat: 1
numa_enable: False
prefetch_size: 1
eval_dataset_task:
type: CausalLanguageModelDataset
dataset_config: *eval_dataset
# default parallel of device num = 8 for Atlas 800T A2
parallel_config:
data_parallel: 1
model_parallel: 8
pipeline_stage: 1
micro_batch_num: 1
vocab_emb_dp: True
gradient_aggregation_group: 4
# when model parallel is greater than 1, we can set micro_batch_interleave_num=2, that may accelerate the train process.
micro_batch_interleave_num: 1
# recompute config
recompute_config:
recompute: True
parallel_optimizer_comm_recompute: False
mp_comm_recompute: True
recompute_slice_activation: True
# callbacks
callbacks:
- type: MFLossMonitor
- type: CheckpointMonitor
prefix: "internlm_20b"
save_checkpoint_steps: 500
keep_checkpoint_max: 3
integrated_save: False
async_save: False
- type: ObsMonitor
# mindspore context init config
context:
mode: 0 #0--Graph Mode; 1--Pynative Mode
device_target: "Ascend"
enable_graph_kernel: False
max_call_depth: 10000
max_device_memory: "59GB"
save_graphs: False
save_graphs_path: "./graph"
device_id: 0
# parallel context config
parallel:
parallel_mode: 1 # 0-dataset, 1-semi, 2-auto, 3-hybrid
gradients_mean: False
enable_alltoall: False
full_batch: True
search_mode: "sharding_propagation"
enable_parallel_optimizer: False
strategy_ckpt_save_file: "./ckpt_strategy.ckpt"
parallel_optimizer_config:
gradient_accumulation_shard: False
parallel_optimizer_threshold: 64
# model config
model:
model_config:
type: InternLMConfig
batch_size: 1 # add for increase predict
seq_length: 2048
hidden_size: 5120
num_layers: 60
num_heads: 40
vocab_size: 103168
multiple_of: 256
rms_norm_eps: 1.0e-5
bos_token_id: 1
eos_token_id: 2
pad_token_id: 2
ignore_token_id: -100
compute_dtype: "float16"
layernorm_compute_type: "float16"
softmax_compute_type: "float16"
rotary_dtype: "float16"
param_init_type: "float16"
has_bias: False
use_past: True
use_flash_attention: True
block_size: 16
num_blocks: 512
is_dynamic: True
scaling_factor: 1.0
extend_method: "None"
offset: 0
checkpoint_name_or_path: "internlm_20b"
repetition_penalty: 1.0
max_decode_length: 512
top_k: 3
top_p: 0.8
do_sample: False
auto_map:
AutoModel: internlm.InternLMForCausalLM
AutoConfig: internlm_config.InternLMConfig
AutoTokenizer: [internlm_tokenizer.InternLMTokenizer, null]
arch:
type: InternLMForCausalLM
processor:
return_tensors: ms
tokenizer:
unk_token: '<unk>'
bos_token: '<s>'
eos_token: '</s>'
pad_token: '</s>'
type: InternLMTokenizer
vocab_file: './internlm-20b-chat/tokenizer.model'
type: LlamaProcessor
# metric
metric:
type: PerplexityMetric
eval_callbacks:
- type: ObsMonitor
auto_tune: False
filepath_prefix: './autotune'
autotune_per_step: 10
profile: False
profile_start_step: 1
profile_stop_step: 10
init_start_profile: False
profile_communication: False
profile_memory: True
# aicc
remote_save_url: "Please input obs url on AICC platform."
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。