Traceback (most recent call last):
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/swift/cli/deploy.py", line 5, in
deploy_main()
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/swift/utils/run_utils.py", line 27, in x_main
result = llm_x(args, **kwargs)
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/swift/llm/deploy.py", line 543, in llm_deploy
model, template = prepare_model_template(args)
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/swift/llm/infer.py", line 182, in prepare_model_template
model, tokenizer = get_model_tokenizer(
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/swift/llm/utils/model.py", line 5572, in get_model_tokenizer
model, tokenizer = get_function(model_dir, torch_dtype, model_kwargs, load_model, **kwargs)
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/swift/llm/utils/model.py", line 4766, in get_model_tokenizer_phi
return get_model_tokenizer_from_repo(
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/swift/llm/utils/model.py", line 947, in get_model_tokenizer_from_repo
model = automodel_class.from_pretrained(
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/modelscope/utils/hf_util.py", line 113, in from_pretrained
module_obj = module_class.from_pretrained(model_dir, *model_args,
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/transformers/models/auto/auto_factory.py", line 556, in from_pretrained
return model_class.from_pretrained(
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/modelscope/utils/hf_util.py", line 76, in from_pretrained
return ori_from_pretrained(cls, model_dir, *model_args, **kwargs)
File "/root/miniconda3/envs/test/lib/python3.8/site-packages/transformers/modeling_utils.py", line 3375, in from_pretrained
model = cls(config, *model_args, **model_kwargs)
File "/root/.cache/huggingface/modules/transformers_modules/TeleChat-12B/modeling_telechat.py", line 743, in init
self.transformer = TelechatModel(config)
File "/root/.cache/huggingface/modules/transformers_modules/TeleChat-12B/modeling_telechat.py", line 603, in init
self.h = nn.ModuleList([TelechatBlock(config, _) for _ in range(config.num_hidden_layers)])
File "/root/.cache/huggingface/modules/transformers_modules/TeleChat-12B/modeling_telechat.py", line 603, in
self.h = nn.ModuleList([TelechatBlock(config, _) for _ in range(config.num_hidden_layers)])
File "/root/.cache/huggingface/modules/transformers_modules/TeleChat-12B/modeling_telechat.py", line 512, in init
self.self_attention = TelechatAttention(config, layer_idx)
File "/root/.cache/huggingface/modules/transformers_modules/TeleChat-12B/modeling_telechat.py", line 357, in init
self.core_attention_flash = FlashSelfAttention(
File "/root/.cache/huggingface/modules/transformers_modules/TeleChat-12B/modeling_telechat.py", line 167, in init
assert flash_attn_unpadded_func is not None, ('Please install FlashAttention first, '
AssertionError: Please install FlashAttention first, e.g., with pip install flash-attn
pip安装后显示cuda相关信息未找到。
flash-attn安装需要cuda
目前cann不支持吗?
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。
登录 后才可以发表评论