diff --git a/test/npu/test_npu.py b/test/npu/test_npu.py index 1cb60774234954f52b4cc04cfd53a2c7260f6049..cc0d73b070213ff00935a13d3c8a9ac3ca8d1d1b 100644 --- a/test/npu/test_npu.py +++ b/test/npu/test_npu.py @@ -722,5 +722,34 @@ class TestNpu(TestCase): npu_allocator_name = torch.npu.get_allocator_backend() self.assertEqual(npu_allocator_name, "native") + def test_contiguous(self): + def run_once(): + x = torch.randn(4, 3, 8, 8).npu() + x = x.permute(0, 2, 1, 3) + x = x.contiguous() + return x + + with torch._subclasses.fake_tensor.FakeTensorMode(): + y = run_once() + self.assertTrue(y.is_contiguous()) + + x = torch.randn(1, 16, 5, 5).npu() + self.assertTrue(x.is_contiguous()) + stride = list(x.stride()) + stride[0] = 20 + # change the stride in dimension 0. the tensor is still contiguous because size[0] is 1 + x.set_(x.storage(), 0, x.size(), stride) + self.assertTrue(x.is_contiguous()) + + x.contiguous(memory_format=torch.contiguous_format) + x.contiguous(memory_format=torch.preserve_format) + + with self.assertRaisesRegex(RuntimeError, "ERR01007 OPS feature not supported"): + x.contiguous(memory_format=torch.channels_last) + + with self.assertRaisesRegex(RuntimeError, "ERR01007 OPS feature not supported"): + x.contiguous(memory_format=torch.channels_last_3d) + + if __name__ == '__main__': run_tests() diff --git a/torch_npu/csrc/aten/common/TensorProperties.cpp b/torch_npu/csrc/aten/common/TensorProperties.cpp index 78c3dafa0eb4278c6de7dc61567d329104c99f4b..13109f7f64698bb641b89cb31c1cc86d420108a4 100644 --- a/torch_npu/csrc/aten/common/TensorProperties.cpp +++ b/torch_npu/csrc/aten/common/TensorProperties.cpp @@ -8,30 +8,30 @@ namespace at_npu { namespace native { at::Tensor NPUNativeFunctions::contiguous(const at::Tensor& self, c10::MemoryFormat memory_format) { - if (self.is_contiguous(memory_format)) { - return self; - } + if (self.is_contiguous(memory_format)) { + return self; + } - TORCH_CHECK( - memory_format == c10::MemoryFormat::Contiguous, - "NPU contiguous operator only supportted contiguous memory format.", OPS_ERROR(ErrCode::NOT_SUPPORT)); - return self.clone(); + TORCH_CHECK( + memory_format == c10::MemoryFormat::Contiguous, + "NPU contiguous operator only supportted contiguous memory format.", OPS_ERROR(ErrCode::NOT_SUPPORT)); + return self.clone(memory_format); } bool NPUNativeFunctions::is_set_to(const at::Tensor& self, const at::Tensor& src) { - if (self.storage().unsafeGetStorageImpl() == src.storage().unsafeGetStorageImpl() && - self.storage_offset() == src.storage_offset() && self.dim() == src.dim() && - NPUNativeFunctions::get_storage_size(self) == NPUNativeFunctions::get_storage_size(src) && - NPUNativeFunctions::get_npu_format(self) == NPUNativeFunctions::get_npu_format(src)) { - for (const auto d : c10::irange(self.dim())) { - if (self.size(d) != src.size(d) || self.stride(d) != src.stride(d)) { - return false; - } + if (self.storage().unsafeGetStorageImpl() == src.storage().unsafeGetStorageImpl() && + self.storage_offset() == src.storage_offset() && self.dim() == src.dim() && + NPUNativeFunctions::get_storage_size(self) == NPUNativeFunctions::get_storage_size(src) && + NPUNativeFunctions::get_npu_format(self) == NPUNativeFunctions::get_npu_format(src)) { + for (const auto d : c10::irange(self.dim())) { + if (self.size(d) != src.size(d) || self.stride(d) != src.stride(d)) { + return false; + } + } + return true; } - return true; + return false; } - return false; -} } // namespace native } // namespace at_npu