diff --git a/PyTorch/built-in/mm/CLIP_for_PyTorch/run_clip.py b/PyTorch/built-in/mm/CLIP_for_PyTorch/run_clip.py index 3375cb41b7143d6515fa9a6694350fec80d45f80..e0e325c48a65d5226d7b107c1176ee6319aad864 100644 --- a/PyTorch/built-in/mm/CLIP_for_PyTorch/run_clip.py +++ b/PyTorch/built-in/mm/CLIP_for_PyTorch/run_clip.py @@ -53,6 +53,8 @@ from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version +torch_npu.npu.config.allow_internal_format = False + logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. diff --git a/PyTorch/built-in/mm/CLIP_for_PyTorch/transformers/src/transformers/models/roberta/modeling_roberta.py b/PyTorch/built-in/mm/CLIP_for_PyTorch/transformers/src/transformers/models/roberta/modeling_roberta.py index 1741476cf3cb1221fa9a10a729d9d5c6e1701386..df8f0b115cbbf93e46bc2e98455272e4ec92df89 100644 --- a/PyTorch/built-in/mm/CLIP_for_PyTorch/transformers/src/transformers/models/roberta/modeling_roberta.py +++ b/PyTorch/built-in/mm/CLIP_for_PyTorch/transformers/src/transformers/models/roberta/modeling_roberta.py @@ -867,7 +867,7 @@ class RobertaModel(RobertaPreTrainedModel): output_hidden_states=output_hidden_states, return_dict=return_dict, ) - sequence_output = encoder_outputs[0].view(bs, from_seq_len, -1) + sequence_output = encoder_outputs[0].view(bs, from_seq_len, -1).clone() pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: