diff --git a/tests/st/hierarchical_memory/memory_ops/test_hiar_mem_ops.py b/tests/st/hierarchical_memory/memory_ops/test_hiar_mem_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..9f4684e194e47c607fb64bbd50711c07dcec4d5a --- /dev/null +++ b/tests/st/hierarchical_memory/memory_ops/test_hiar_mem_ops.py @@ -0,0 +1,197 @@ +# Copyright 2025 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ +"""Test hierarchical memory Ops""" + +import os +import numpy as np +import mindspore as ms +from mindspore import mutable +from mindspore import jit, ops, context +from mindspore import Tensor, Parameter +from mindspore.nn import Cell +from mindspore.common.initializer import One + +from tests.mark_utils import arg_mark + +os.environ['DEVICE_ID'] = '0' +context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', device_id=0) + +@arg_mark(plat_marks=['platform_ascend910b'], level_mark='level0', card_mark='onecard', essential_mark='essential') +def test_hiar_addr_to_remote(): + """ + Feature: Remote memory base operator + Description: Base scene. + Expectation: No Exception. + """ + + @jit + def foo(x): + x = ops.auto_generate.UpdateToRemote()(x) + return x + + x = Tensor([1, 2, 3, 4]) + ret = foo(x) + assert np.all(ret.asnumpy() == np.array((1, 2, 3, 4))) + assert ret.device == "Ascend:0" + + x = Tensor(shape = (6), dtype=ms.float32, init=One()) + ret = foo(x) + assert np.all(ret.asnumpy() == np.ones((6))) + assert ret.device == "Ascend:0" + + x = Tensor(shape = (6,7), dtype=ms.float32, init=One()) + ret = foo(x) + assert np.all(ret.asnumpy() == np.ones((6,7))) + assert ret.device == "Ascend:0" + + x = Tensor(shape = (6,7,8), dtype=ms.float32, init=One()) + ret = foo(x) + assert np.all(ret.asnumpy() == np.ones((6,7,8))) + assert ret.device == "Ascend:0" + + +@arg_mark(plat_marks=['platform_ascend910b'], level_mark='level0', card_mark='onecard', essential_mark='essential') +def test_hiar_addr_update_to_device(): + """ + Feature: Remote memory base operator + Description: Base scene. + Expectation: No Exception. + """ + os.environ['MS_DEV_HIERARCHICAL_MEMORY'] = '1' + + @jit + def foo(x): + x = ops.auto_generate.UpdateToDevice()(x) + return x + + x = Tensor([1, 2, 3, 4]) + ret = foo(x) + assert np.all(ret.asnumpy() == np.array((1, 2, 3, 4))) + + x = Tensor(shape = (6), dtype=ms.float32, init=One()) + ret = foo(x) + assert np.all(ret.asnumpy() == np.ones((6))) + assert ret.device == "Ascend:0" + + x = Tensor(shape = (6,7), dtype=ms.float32, init=One()) + ret = foo(x) + assert np.all(ret.asnumpy() == np.ones((6,7))) + assert ret.device == "Ascend:0" + + x = Tensor(shape = (6,7,8), dtype=ms.float32, init=One()) + ret = foo(x) + assert np.all(ret.asnumpy() == np.ones((6,7,8))) + assert ret.device == "Ascend:0" + + os.environ['MS_DEV_HIERARCHICAL_MEMORY'] = '0' + + +@arg_mark(plat_marks=['platform_ascend910b'], level_mark='level0', card_mark='onecard', essential_mark='essential') +def test_hiar_addr_detach(): + """ + Feature: Remote memory base operator + Description: Base scene. + Expectation: No Exception. + """ + os.environ['MS_DEV_HIERARCHICAL_MEMORY'] = '1' + + @jit + def foo(x): + x = ops.auto_generate.UpdateToRemote()(x) + x = ops.auto_generate.Detach()(x) + x = ops.auto_generate.Detach()(x) + return x + + x = Tensor([1, 2, 3, 4]) + ret = foo(x) + assert np.all(ret.asnumpy() == np.array((1, 2, 3, 4))) + + # 不同维度 + x = Tensor(shape = (6), dtype=ms.float32, init=One()) + ret = foo(x) + assert np.all(ret.asnumpy() == np.ones((6))) + + x = Tensor(shape = (6,7), dtype=ms.float32, init=One()) + ret = foo(x) + assert np.all(ret.asnumpy() == np.ones((6,7))) + + x = Tensor(shape = (6,7,8), dtype=ms.float32, init=One()) + ret = foo(x) + assert np.all(ret.asnumpy() == np.ones((6,7,8))) + + os.environ['MS_DEV_HIERARCHICAL_MEMORY'] = '0' + + +@arg_mark(plat_marks=['platform_ascend910b'], level_mark='level0', card_mark='onecard', essential_mark='essential') +def test_hiar_addr_to_remote_and_detach(): + """ + Feature: Remote memory base operator + Description: Base scene. + Expectation: No Exception. + """ + os.environ['MS_DEV_HIERARCHICAL_MEMORY'] = '1' + + @jit + def foo(x): + ops.auto_generate.UpdateToRemote()(x) + ops.auto_generate.Detach()(x) + return x + + x = Tensor([1, 2, 3, 4]) + ret = foo(x) + assert np.all(ret.asnumpy() == np.array((1, 2, 3, 4))) + os.environ['MS_DEV_HIERARCHICAL_MEMORY'] = '0' + + +@arg_mark(plat_marks=['platform_ascend910b'], level_mark='level0', card_mark='onecard', essential_mark='essential') +def test_hiar_addr_in_for_variable_loop(): + """ + Feature: Remote memory base operator + Description: Base scene. + Expectation: No Exception. + """ + os.environ['MS_DEV_HIERARCHICAL_MEMORY'] = '1' + + class Net(Cell): + def __init__(self): + super().__init__() + self.param_a = Parameter(Tensor([1, 1, 1], dtype=ms.int32), name="param_a") + self.param_b = Parameter(Tensor([1, 1, 1], dtype=ms.int32), name="param_b") + self.param_c = Parameter(Tensor([1, 1, 1], dtype=ms.int32), name="param_c") + self.params = self.trainable_params() + self.prefetch = ops.auto_generate.UpdateToDevice() + self.depend = ops.Depend() + self.detach = ops.auto_generate.Detach() + + @jit + def construct(self, a, b, c, d): + m = (a, b, c) + a = Tensor([0, 0, 0]) + for i in range(d): + prefetch_result = self.prefetch(self.params[i], sync=False) + cur = self.depend(m[i], prefetch_result) + a = a + cur + self.params[i] + detach_result = self.detach(m[i], sync=False) + a = self.depend(a, detach_result) + return a + + x = Tensor([1, 1, 1], dtype=ms.int32) + y = Tensor([1, 1, 1], dtype=ms.int32) + z = Tensor([1, 1, 1], dtype=ms.int32) + d = mutable(3) + net = Net() + ret = net(x, y, z, d) + assert np.all(ret.asnumpy() == np.array((6, 6, 6))) + os.environ['MS_DEV_HIERARCHICAL_MEMORY'] = '1' diff --git a/tests/ut/cpp/ops/test_ops_detach.cc b/tests/ut/cpp/ops/test_ops_detach.cc new file mode 100644 index 0000000000000000000000000000000000000000..8f26b5d4213e5a1e4a7fd3f726055789a92f40c2 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_detach.cc @@ -0,0 +1,98 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ir/dtype/type.h" +#include "ir/primitive.h" +#include "utils/tensor_construct_utils.h" +#include "abstract/dshape.h" +#include "abstract/abstract_value.h" +#include "ops/test_ops.h" +#include "ops/test_ops_cmp_utils.h" +#include "infer/ops_func_impl/detach.h" +#include "ops/test_value_utils.h" + + +namespace mindspore{ +namespace ops{ + +// input and output params +struct DetachParams { + // input + ShapeVector x_shape; + TypePtr x_type; + ValuePtr sync; + // output + ShapeVector output_shape; + TypePtr output_type; +}; + +class TestDetach : public TestOps, public testing::WithParamInterface {}; + +TEST_P(TestDetach, dyn_shape) { + // get params + const auto ¶m = GetParam(); + auto x = std::make_shared(param.x_type, param.x_shape); + auto sync = param.sync->ToAbstract(); + + // build expected struct of outputs (BaseShape, Type) + auto expect_shape = + std::make_shared(param.output_shape); + auto expect_type = + std::make_shared(param.output_type); + DetachFuncImpl detach_func_impl; + auto prim = std::make_shared("Detach"); + + // get real shape & output and compare real VS expected + auto real_type = detach_func_impl.InferType(prim, {x, sync}); + ASSERT_TRUE(*real_type == *expect_type); + if (*real_type == *expect_type){ + MS_LOG(ERROR) << "real_type == expect_type"; + } + + auto real_shape = detach_func_impl.InferShape(prim, {x, sync}); + ASSERT_TRUE(*real_shape == *expect_shape); + if (*real_shape == *expect_shape){ + MS_LOG(ERROR) << "real_shape == expect_shape"; + } +} + +// 测试用例批量测试参数 +INSTANTIATE_TEST_CASE_P( + TestDetach, TestDetach, + testing::Values( + DetachParams{{1} , kFloat32, CreateScalar(true), {1}, kFloat32}, + DetachParams{{6} , kFloat32, CreateScalar(true), {6}, kFloat32}, + DetachParams{{6, 7} , kFloat32, CreateScalar(true), {6, 7}, kFloat32}, + DetachParams{{6, 7, 8} , kFloat32, CreateScalar(true), {6, 7, 8}, kFloat32}, + DetachParams{{6, 7, 8, 9} , kFloat32, CreateScalar(true), {6, 7, 8, 9}, kFloat32}, + DetachParams{{6, 7, 8, 9, 10} , kFloat32, CreateScalar(true), {6, 7, 8, 9, 10}, kFloat32}, + + DetachParams{{1} , kFloat32, CreateScalar(false), {1}, kFloat32}, + DetachParams{{6} , kFloat32, CreateScalar(false), {6}, kFloat32}, + DetachParams{{6, 7} , kFloat32, CreateScalar(false), {6, 7}, kFloat32}, + DetachParams{{6, 7, 8} , kFloat32, CreateScalar(false), {6, 7, 8}, kFloat32}, + DetachParams{{6, 7, 8, 9} , kFloat32, CreateScalar(false), {6, 7, 8, 9}, kFloat32}, + DetachParams{{6, 7, 8, 9, 10} , kFloat32, CreateScalar(false), {6, 7, 8, 9, 10}, kFloat32}, + + DetachParams{{6, 7, 8, 9}, kFloat32, CreateScalar(true), {6, 7, 8, 9}, kFloat32}, + DetachParams{{6, 7, 8, 9}, kFloat32, CreateScalar(false), {6, 7, 8, 9}, kFloat32}, + DetachParams{{6, 7, 8, 9}, kFloat32, CreateScalar(true), {6, 7, 8, 9}, kFloat32}, + DetachParams{{6, 7, 8, 9}, kInt16, CreateScalar(true), {6, 7, 8, 9}, kInt16} + )); +} +} diff --git a/tests/ut/cpp/ops/test_ops_update_to_device.cc b/tests/ut/cpp/ops/test_ops_update_to_device.cc new file mode 100644 index 0000000000000000000000000000000000000000..645f0486522b77c3cfa6601b9c006138722f49e4 --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_update_to_device.cc @@ -0,0 +1,92 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ir/dtype/type.h" +#include "ir/primitive.h" +#include "utils/tensor_construct_utils.h" +#include "abstract/dshape.h" +#include "abstract/abstract_value.h" +#include "ops/test_ops.h" +#include "ops/test_ops_cmp_utils.h" +#include "infer/ops_func_impl/update_to_device.h" +#include "ops/test_value_utils.h" + + +namespace mindspore{ +namespace ops{ + +struct UpdateToDeviceParams { + // input + ShapeVector x_shape; + TypePtr x_type; + ValuePtr sync; + // output + ShapeVector output_shape; + TypePtr output_type; +}; + +class TestUpdateToDevice : public TestOps, public testing::WithParamInterface {}; + +TEST_P(TestUpdateToDevice, dyn_shape) { + // get params + const auto ¶m = GetParam(); + auto x = std::make_shared(param.x_type, param.x_shape); + auto sync = param.sync->ToAbstract(); + auto expect_shape = std::make_shared(param.output_shape); + auto expect_type = std::make_shared(param.output_type); + + UpdateToDeviceFuncImpl prefetch_func_impl; + auto prim = std::make_shared("UpdateToDevice"); + + auto real_type = prefetch_func_impl.InferType(prim, {x, sync});\ + ASSERT_TRUE(*real_type == *expect_type); + + if (*real_type == *expect_type){ + MS_LOG(ERROR) << "real_type == expect_type"; + } + auto real_shape = prefetch_func_impl.InferShape(prim, {x, sync}); + ASSERT_TRUE(*real_shape == *expect_shape); + if (*real_shape == *expect_shape){ + MS_LOG(ERROR) << "real_shape == expect_shape"; + } +} + +INSTANTIATE_TEST_CASE_P( + TestUpdateToDevice, TestUpdateToDevice, + testing::Values( + UpdateToDeviceParams{{1} , kFloat32, CreateScalar(true), {1}, kFloat32}, + UpdateToDeviceParams{{6} , kFloat32, CreateScalar(true), {6}, kFloat32}, + UpdateToDeviceParams{{6, 7} , kFloat32, CreateScalar(true), {6, 7}, kFloat32}, + UpdateToDeviceParams{{6, 7, 8} , kFloat32, CreateScalar(true), {6, 7, 8}, kFloat32}, + UpdateToDeviceParams{{6, 7, 8, 9} , kFloat32, CreateScalar(true), {6, 7, 8, 9}, kFloat32}, + UpdateToDeviceParams{{6, 7, 8, 9, 10} , kFloat32, CreateScalar(true), {6, 7, 8, 9, 10}, kFloat32}, + + UpdateToDeviceParams{{1} , kFloat32, CreateScalar(false), {1}, kFloat32}, + UpdateToDeviceParams{{6} , kFloat32, CreateScalar(false), {6}, kFloat32}, + UpdateToDeviceParams{{6, 7} , kFloat32, CreateScalar(false), {6, 7}, kFloat32}, + UpdateToDeviceParams{{6, 7, 8} , kFloat32, CreateScalar(false), {6, 7, 8}, kFloat32}, + UpdateToDeviceParams{{6, 7, 8, 9} , kFloat32, CreateScalar(false), {6, 7, 8, 9}, kFloat32}, + UpdateToDeviceParams{{6, 7, 8, 9, 10} , kFloat32, CreateScalar(false), {6, 7, 8, 9, 10}, kFloat32}, + + UpdateToDeviceParams{{6, 7, 8, 9}, kFloat32, CreateScalar(true), {6, 7, 8, 9}, kFloat32}, + UpdateToDeviceParams{{6, 7, 8, 9}, kFloat32, CreateScalar(false), {6, 7, 8, 9}, kFloat32}, + UpdateToDeviceParams{{6, 7, 8, 9}, kInt16, CreateScalar(true), {6, 7, 8, 9}, kInt16} + )); + +} +} diff --git a/tests/ut/cpp/ops/test_ops_update_to_remote.cc b/tests/ut/cpp/ops/test_ops_update_to_remote.cc new file mode 100644 index 0000000000000000000000000000000000000000..b9ced47564d3c297d4421ccfcc045a27635221ab --- /dev/null +++ b/tests/ut/cpp/ops/test_ops_update_to_remote.cc @@ -0,0 +1,92 @@ +/** + * Copyright 2025 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include "common/common_test.h" +#include "ir/dtype/type.h" +#include "ir/primitive.h" +#include "utils/tensor_construct_utils.h" +#include "abstract/dshape.h" +#include "abstract/abstract_value.h" +#include "ops/test_ops.h" +#include "ops/test_ops_cmp_utils.h" +#include "infer/ops_func_impl/update_to_remote.h" +#include "ops/test_value_utils.h" + +namespace mindspore{ +namespace ops{ +struct UpdateToRemoteParams { + // INPUT + ShapeVector x_shape; + TypePtr x_type; + ValuePtr sync; + // OUTPUT + ShapeVector output_shape; + TypePtr output_type; +}; + +class TestUpdateToRemote : public TestOps, public testing::WithParamInterface {}; + +TEST_P(TestUpdateToRemote, dyn_shape) { + const auto ¶m = GetParam(); + auto x = std::make_shared(param.x_type, param.x_shape); + auto sync = param.sync->ToAbstract(); + + auto expect_shape = std::make_shared(param.output_shape); + auto expect_type = std::make_shared(param.output_type); + + UpdateToRemoteFuncImpl to_remote_func_impl; + auto prim = std::make_shared("UpdateToRemote"); + + auto real_dtype = to_remote_func_impl.InferType(prim, {x, sync}); + ASSERT_TRUE(*real_dtype == *expect_type); + if (*real_dtype == *expect_type){ + MS_LOG(ERROR) << "real_dtype == expect_type"; + } + + auto real_shape = to_remote_func_impl.InferShape(prim, {x, sync}); + ASSERT_TRUE(*real_shape == *expect_shape); + + if (*real_shape == *expect_shape){ + MS_LOG(ERROR) << "real_shape == expect_shape"; + } +} + +INSTANTIATE_TEST_CASE_P( + TestUpdateToRemote, TestUpdateToRemote, + testing::Values( + UpdateToRemoteParams{{1} , kFloat32, CreateScalar(true), {1}, kFloat32}, + UpdateToRemoteParams{{6} , kFloat32, CreateScalar(true), {6}, kFloat32}, + UpdateToRemoteParams{{6, 7} , kFloat32, CreateScalar(true), {6, 7}, kFloat32}, + UpdateToRemoteParams{{6, 7, 8} , kFloat32, CreateScalar(true), {6, 7, 8}, kFloat32}, + UpdateToRemoteParams{{6, 7, 8, 9} , kFloat32, CreateScalar(true), {6, 7, 8, 9}, kFloat32}, + UpdateToRemoteParams{{6, 7, 8, 9, 10} , kFloat32, CreateScalar(true), {6, 7, 8, 9, 10}, kFloat32}, + + UpdateToRemoteParams{{1} , kFloat32, CreateScalar(false), {1}, kFloat32}, + UpdateToRemoteParams{{6} , kFloat32, CreateScalar(false), {6}, kFloat32}, + UpdateToRemoteParams{{6, 7} , kFloat32, CreateScalar(false), {6, 7}, kFloat32}, + UpdateToRemoteParams{{6, 7, 8} , kFloat32, CreateScalar(false), {6, 7, 8}, kFloat32}, + UpdateToRemoteParams{{6, 7, 8, 9} , kFloat32, CreateScalar(false), {6, 7, 8, 9}, kFloat32}, + UpdateToRemoteParams{{6, 7, 8, 9, 10} , kFloat32, CreateScalar(false), {6, 7, 8, 9, 10}, kFloat32}, + + UpdateToRemoteParams{{6, 7, 8, 9}, kFloat32, CreateScalar(true), {6, 7, 8, 9}, kFloat32}, + UpdateToRemoteParams{{6, 7, 8, 9}, kFloat32, CreateScalar(false), {6, 7, 8, 9}, kFloat32}, + UpdateToRemoteParams{{6, 7, 8, 9}, kInt16, CreateScalar(true), {6, 7, 8, 9}, kInt16}, + UpdateToRemoteParams{{6, 7, 8, 9}, kInt32, CreateScalar(true), {6, 7, 8, 9}, kInt32}, + UpdateToRemoteParams{{6, 7, 8, 9}, kInt64, CreateScalar(true), {6, 7, 8, 9}, kInt64} + )); +} +}