From 58738030e5daa0a1a27f19b87168ca7829616216 Mon Sep 17 00:00:00 2001 From: chendanyang Date: Sun, 30 Nov 2025 12:46:04 +0800 Subject: [PATCH] refactor_deephe3nn --- .jenkins/check/config/whitelizard.txt | 2 +- MindChem/README_CN.md | 6 +- MindChem/applications/deephe3nn/README.md | 28 +- MindChem/applications/deephe3nn/README_EN.md | 28 +- MindChem/applications/deephe3nn/data/data.py | 2 +- MindChem/applications/deephe3nn/data/graph.py | 11 +- .../{mindchemistry => }/graph/graph.py | 46 +- .../{mindchemistry => }/graph/loss.py | 2 +- .../deephe3nn/mindchemistry/__init__.py | 64 -- .../deephe3nn/mindchemistry/cell/__init__.py | 32 - .../mindchemistry/cell/activation.py | 38 -- .../mindchemistry/cell/basic_block.py | 600 ------------------ .../mindchemistry/cell/convolution.py | 120 ---- .../mindchemistry/cell/deephe3nn/__init__.py | 20 - .../deephe3nn/mindchemistry/cell/embedding.py | 165 ----- .../mindchemistry/cell/message_passing.py | 165 ----- .../deephe3nn/mindchemistry/graph/__init__.py | 15 - .../mindchemistry/graph/dataloader.py | 408 ------------ .../mindchemistry/graph/normlization.py | 278 -------- .../deephe3nn/mindchemistry/utils/__init__.py | 18 - .../mindchemistry/utils/check_func.py | 128 ---- .../mindchemistry/utils/load_config.py | 85 --- .../cell/deephe3nn => models}/e3modules.py | 9 +- .../applications/deephe3nn/models/kernel.py | 13 +- .../cell/deephe3nn => models}/model.py | 19 +- .../deephe3nn/models/parse_configs.py | 3 +- .../cell/deephe3nn => models}/utils.py | 143 +---- .../matformer/mindchemistry/so2_conv/so2.py | 16 - mindscience/e3nn/README.md | 361 +++++++++++ .../e3nn.md => mindscience/e3nn/README_en.md | 42 ++ .../e3nn}/so2_conv/__init__.py | 4 +- .../e3nn}/so2_conv/init_edge_rot_mat.py | 33 +- .../e3nn}/so2_conv/jd.pkl | Bin .../e3nn}/so2_conv/so2.py | 0 .../e3nn}/so2_conv/so3.py | 0 .../e3nn}/so2_conv/wigner.py | 20 +- 36 files changed, 515 insertions(+), 2409 deletions(-) rename MindChem/applications/deephe3nn/{mindchemistry => }/graph/graph.py (83%) rename MindChem/applications/deephe3nn/{mindchemistry => }/graph/loss.py (98%) delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/__init__.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/cell/__init__.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/cell/activation.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/cell/basic_block.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/cell/convolution.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/__init__.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/cell/embedding.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/cell/message_passing.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/graph/__init__.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/graph/dataloader.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/graph/normlization.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/utils/__init__.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/utils/check_func.py delete mode 100644 MindChem/applications/deephe3nn/mindchemistry/utils/load_config.py rename MindChem/applications/deephe3nn/{mindchemistry/cell/deephe3nn => models}/e3modules.py (99%) rename MindChem/applications/deephe3nn/{mindchemistry/cell/deephe3nn => models}/model.py (98%) rename MindChem/applications/deephe3nn/{mindchemistry/cell/deephe3nn => models}/utils.py (88%) create mode 100644 mindscience/e3nn/README.md rename docs/architecture/mindscience/e3nn.md => mindscience/e3nn/README_en.md (81%) rename {MindChem/applications/deephe3nn/mindchemistry => mindscience/e3nn}/so2_conv/__init__.py (94%) rename {MindChem/applications/deephe3nn/mindchemistry => mindscience/e3nn}/so2_conv/init_edge_rot_mat.py (74%) rename {MindChem/applications/deephe3nn/mindchemistry => mindscience/e3nn}/so2_conv/jd.pkl (100%) rename {MindChem/applications/deephe3nn/mindchemistry => mindscience/e3nn}/so2_conv/so2.py (100%) rename {MindChem/applications/deephe3nn/mindchemistry => mindscience/e3nn}/so2_conv/so3.py (100%) rename {MindChem/applications/deephe3nn/mindchemistry => mindscience/e3nn}/so2_conv/wigner.py (77%) diff --git a/.jenkins/check/config/whitelizard.txt b/.jenkins/check/config/whitelizard.txt index dd435a60c..939024f87 100644 --- a/.jenkins/check/config/whitelizard.txt +++ b/.jenkins/check/config/whitelizard.txt @@ -29,5 +29,5 @@ mindscience/MindSPONGE/applications/rf_diffusion/rfdiffusion/inference/ab_util.p #MindChem mindscience/MindChem/applications/deephe3nn/data/graph.py:get_graph -mindscience/MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/model.py:__init__ +mindscience/MindChem/applications/deephe3nn/models/model.py:__init__ mindscience/MindChem/applications/crystalflow/test_crystalflow.py:test_flow diff --git a/MindChem/README_CN.md b/MindChem/README_CN.md index 96d610b97..adafaa6e3 100644 --- a/MindChem/README_CN.md +++ b/MindChem/README_CN.md @@ -38,8 +38,8 @@ ## 最新消息 -- `2025.04.16` 增加CrystalFlow模型支持; - `2025.07.07` 增加Orb模型支持; +- `2025.04.16` 增加CrystalFlow模型支持; - `2025.03.30` MindChemistry 0.2.0版本发布,包括多个应用案例,支持NequIP、DeephE3nn、Matformer以及DiffCSP模型; - `2024.07.30` MindChemistry 0.1.0版本发布; @@ -56,13 +56,13 @@ | 模型 | 体系 | 数据 | 任务 | |---------|------|------|------| | [NequIP](./applications/nequip/) | 小分子 | Revised Molecular Dynamics 17 (rMD17) 数据集 | 分子能量预测,基于等变计算与图神经网络 | -| [Orb](https://gitee.com/mindspore/mindscience/tree/master/MindChemistry/applications/orb) | 分子与晶体材料体系 | 大规模三维原子结构数据集,DFT 计算结果 | 通用图神经网络势,预测能量、力、应力,用于分子动力学模拟等 | +| [Orb](./applications/orb/) | 分子与晶体材料体系 | 大规模三维原子结构数据集,DFT 计算结果 | 通用图神经网络势,预测能量、力、应力,用于分子动力学模拟等 | ### 性质预测 | 模型 | 体系 | 数据 | 任务 | |---------|------|------|------| -| [DeephE3nn](https://gitee.com/mindspore/mindscience/tree/master/MindChemistry/applications/deephe3nn) | 材料体系 | 双层石墨烯数据集 | 基于 E(3)-等变神经网络预测电子哈密顿量 | +| [DeephE3nn](./applications/deephe3nn/) | 材料体系 | 双层石墨烯数据集 | 基于 E(3)-等变神经网络预测电子哈密顿量 | | [Matformer](./applications/matformer/) | 晶体材料体系 | JARVIS-DFT 3D数据集 | 基于图神经网络 + Transformer 预测材料性质 | ### 结构生成 diff --git a/MindChem/applications/deephe3nn/README.md b/MindChem/applications/deephe3nn/README.md index 3d3a81304..0481450b2 100644 --- a/MindChem/applications/deephe3nn/README.md +++ b/MindChem/applications/deephe3nn/README.md @@ -8,11 +8,11 @@ ## 数据集 -> 从https://zenodo.org/records/7553640 下载 Bilayer_graphene_dataset.zip 到当前目录并解压, 不要修改其文件名。 +> 从https://zenodo.org/records/7553640 下载 Bilayer_graphene_dataset.zip 到当前目录并解压,不要修改其文件名。 ## 环境要求 -> 1. 安装`mindspore` +> 1. 安装`mindspore>=2.7.1` > 2. 安装`mindscience` > 2. 安装依赖包`pip install -r requirements.txt` @@ -30,17 +30,23 @@ ```txt deephe3nn - │ README.md README文件 - │ train.py 训练启动脚本 - │ predictor.py 推理启动脚本 + │ README.md README文件 + │ train.py 训练启动脚本 + │ predictor.py 推理启动脚本 │ └─data - data.py 数据集处理 - graph.py 图数据结构 + │ data.py 数据集处理 + │ graph.py 图数据结构 │ └─models - kernel.py 主执行流程 - parse_configs.py config处理文件 + │ kernel.py 主执行流程 + │ parse_configs.py config处理文件 + │ e3modules.py e3模块 + │ model.py 模型定义 + │ utils.py 工具函数 + └─graph + │ graph.py 图相关操作 + │ loss.py 图损失函数 └─configs Bilayer_graphene_train_numpy.ini 模型config文件 ``` @@ -49,7 +55,7 @@ deephe3nn ### 训练 -```txt +```bash pip install -r requirements.txt python train.py configs/Bilayer_graphene_train.ini ``` @@ -58,7 +64,7 @@ python train.py configs/Bilayer_graphene_train.ini 将权重的path写入config文件的checkpoint_dir中 -```txt +```bash pip install -r requirements.txt python predict.py configs/Bilayer_graphene_train.ini ``` diff --git a/MindChem/applications/deephe3nn/README_EN.md b/MindChem/applications/deephe3nn/README_EN.md index 8b76e36b5..dfa467092 100644 --- a/MindChem/applications/deephe3nn/README_EN.md +++ b/MindChem/applications/deephe3nn/README_EN.md @@ -12,7 +12,7 @@ Download `Bilayer_graphene_dataset.zip` from https://zenodo.org/records/7553640 ## Environment Requirements -1. Install `mindspore` +1. Install `mindspore>=2.7.1` 2. Install `mindscience` 3. Install dependencies: `pip install -r requirements.txt` @@ -30,20 +30,28 @@ Download `Bilayer_graphene_dataset.zip` from https://zenodo.org/records/7553640 ```txt deephe3nn - │ README.md README (Chinese) - │ README_EN.md README (English) - │ train.py Training entry - │ predictor.py Inference entry + │ README.md README (Chinese) + │ README_EN.md README (English) + │ train.py Training entry + │ predict.py Inference entry + │ requirements.txt Python dependencies │ └─data - data.py Dataset processing - graph.py Graph data structures + │ data.py Dataset processing + │ graph.py Graph data structures │ └─models - kernel.py Main execution flow - parse_configs.py Config processing + │ kernel.py Main execution flow + │ parse_configs.py Config processing + │ e3modules.py E3 modules + │ model.py Model definition + │ utils.py Utility functions + └─graph + │ graph.py Graph operations + │ loss.py Graph loss functions + │ └─configs - Bilayer_graphene_train_numpy.ini Model config file + Bilayer_graphene_train.ini Model config file ``` ## Training and Inference diff --git a/MindChem/applications/deephe3nn/data/data.py b/MindChem/applications/deephe3nn/data/data.py index 3c3cf5b27..367cda193 100644 --- a/MindChem/applications/deephe3nn/data/data.py +++ b/MindChem/applications/deephe3nn/data/data.py @@ -27,7 +27,7 @@ from pathos.multiprocessing import ProcessingPool as Pool from pymatgen.core.structure import Structure from data.graph import get_graph, load_orbital_types -from mindchemistry.cell.deephe3nn.utils import process_targets +from models.utils import process_targets class AijData: diff --git a/MindChem/applications/deephe3nn/data/graph.py b/MindChem/applications/deephe3nn/data/graph.py index 5c183cf20..9957a9742 100644 --- a/MindChem/applications/deephe3nn/data/graph.py +++ b/MindChem/applications/deephe3nn/data/graph.py @@ -25,7 +25,7 @@ import warnings import h5py import numpy as np -from mindchemistry.cell.deephe3nn.utils import convert2numpyt, flt2cplx +from models.utils import convert2numpyt, flt2cplx def load_orbital_types(path, return_orbital_types=False): @@ -65,15 +65,6 @@ def is_ij(edge_key): out = False return out - -def convert_ijji(edge_key): - r"""convert edge key between ij and ji""" - if isinstance(edge_key, str): - edge_key = eval(edge_key) - out = [-edge_key[0], -edge_key[1], -edge_key[2], edge_key[4], edge_key[3]] - return out - - def get_edge_fea(cart_coords, lattice, default_dtype_np, edge_key): """ get edge feature diff --git a/MindChem/applications/deephe3nn/mindchemistry/graph/graph.py b/MindChem/applications/deephe3nn/graph/graph.py similarity index 83% rename from MindChem/applications/deephe3nn/mindchemistry/graph/graph.py rename to MindChem/applications/deephe3nn/graph/graph.py index 1f13b8869..ffd992340 100644 --- a/MindChem/applications/deephe3nn/mindchemistry/graph/graph.py +++ b/MindChem/applications/deephe3nn/graph/graph.py @@ -14,7 +14,7 @@ # ============================================================================ """graph""" import mindspore as ms -from mindspore import ops, nn +from mindspore import nn, ops def degree(index, dim_size, mask=None): @@ -171,28 +171,6 @@ class AggregateEdgeToGlobal(Aggregate): return self.scatter(edge_attr, batch_edge, out=out, dim_size=dim_size, mask=mask) -class AggregateEdgeToNode(Aggregate): - """AggregateEdgeToNode""" - - def __init__(self, mode='add', dim=0): - super().__init__(mode=mode) - self.dim = dim - - def construct(self, edge_attr, edge_index, out=None, dim_size=None, mask=None): - r""" - Args: - edge_attr (Tensor): The source tensor of edge attributes. - edge_index (Tensor): The indices of nodes in each edge. - out (Tensor): The destination tensor. Default: None. - dim_size (int): If `out` is not given, automatically create output with size `dim_size`. Default: None. - out and dim_size cannot be both None. - mask (Tensor): The mask of the node_attr tensor - Returns: - Tensor. - """ - return self.scatter(edge_attr, edge_index[self.dim], out=out, dim_size=dim_size, mask=mask) - - class Lift(nn.Cell): """Lift""" @@ -253,28 +231,6 @@ class LiftGlobalToNode(Lift): return self.repeat(global_attr, num_node, max_len=max_len) -class LiftGlobalToEdge(Lift): - """LiftGlobalToEdge""" - - def __init__(self, mode="multi_graph"): - super().__init__(mode=mode) - - def construct(self, global_attr, batch_edge=None, num_edge=None, mask=None, max_len=None): - r""" - Args: - global_attr (Tensor): The source tensor of global attributes. - batch_edge (Tensor): The indices of samples to get. - num_edge (Int): The number of edge in the graph, when there is only 1 graph. - mask (Tensor): The mask of the output tensor. - max_len (Int): The output length. - Returns: - Tensor. - """ - if global_attr.shape[0] > 1 or self.mode == "multi_graph": - return self.lift(global_attr, batch_edge, mask=mask) - return self.repeat(global_attr, num_edge, max_len=max_len) - - class LiftNodeToEdge(Lift): """LiftNodeToEdge""" diff --git a/MindChem/applications/deephe3nn/mindchemistry/graph/loss.py b/MindChem/applications/deephe3nn/graph/loss.py similarity index 98% rename from MindChem/applications/deephe3nn/mindchemistry/graph/loss.py rename to MindChem/applications/deephe3nn/graph/loss.py index 28e241e6f..ea39b8278 100644 --- a/MindChem/applications/deephe3nn/mindchemistry/graph/loss.py +++ b/MindChem/applications/deephe3nn/graph/loss.py @@ -14,7 +14,7 @@ # ============================================================================ """loss""" import mindspore as ms -from mindspore import ops, nn +from mindspore import nn, ops class LossMaskBase(nn.Cell): diff --git a/MindChem/applications/deephe3nn/mindchemistry/__init__.py b/MindChem/applications/deephe3nn/mindchemistry/__init__.py deleted file mode 100644 index c54166ca1..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Initialization for MindChemistry APIs.""" - -import time -import mindspore as ms -from mindspore import log as logger -from mindscience.e3nn import * -from .cell import * -from .utils import * -from .graph import * -from .so2_conv import * - -__all__ = [] -__all__.extend(cell.__all__) -__all__.extend(utils.__all__) - -def _mindspore_version_check(): - """ - Check MindSpore version for MindChemistry. - - Raises: - ImportError: If MindSpore cannot be imported. - """ - try: - _ = ms.__version__ - except ImportError as exc: - raise ImportError( - "Cannot find MindSpore in the current environment. Please install " - "MindSpore before using MindChemistry, by following the instruction at " - "https://www.mindspore.cn/install" - ) from exc - - ms_version = ms.__version__[:5] - required_mindspore_version = "1.8.1" - - if ms_version < required_mindspore_version: - logger.warning( - f"Current version of MindSpore ({ms_version}) is not compatible with MindChemistry. " - f"Some functions might not work or even raise errors. Please install MindSpore " - f"version >= {required_mindspore_version}. For more details about dependency settings, " - f"please check the instructions at the MindSpore official website " - f"https://www.mindspore.cn/install or check the README.md at " - f"https://gitee.com/mindspore/mindscience" - ) - - for i in range(3, 0, -1): - logger.warning(f"Please pay attention to the above warning, countdown: {i}") - time.sleep(1) - - -_mindspore_version_check() diff --git a/MindChem/applications/deephe3nn/mindchemistry/cell/__init__.py b/MindChem/applications/deephe3nn/mindchemistry/cell/__init__.py deleted file mode 100644 index e1398d8af..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/cell/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""initialization for cells""" -from .allegro import * -from .basic_block import AutoEncoder, FCNet, MLPNet -from .cspnet import CSPNet -from .deephe3nn import * -from .dimenet import * -from .gemnet import * -from .matformer import * -from .nequip import Nequip -from .orb import * - -__all__ = ["Nequip", "AutoEncoder", "FCNet", "MLPNet", "CSPNet"] -__all__.extend(deephe3nn.__all__) -__all__.extend(matformer.__all__) -__all__.extend(allegro.__all__) -__all__.extend(dimenet.__all__) -__all__.extend(gemnet.__all__) -__all__.extend(orb.__all__) diff --git a/MindChem/applications/deephe3nn/mindchemistry/cell/activation.py b/MindChem/applications/deephe3nn/mindchemistry/cell/activation.py deleted file mode 100644 index d09b35831..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/cell/activation.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2024 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""get activation function.""" -from __future__ import absolute_import - -from mindspore import ops -from mindspore.nn.layer import activation - -_activation = { - 'softmax': activation.Softmax, - 'logsoftmax': activation.LogSoftmax, - 'relu': activation.ReLU, - 'silu': activation.SiLU, - 'relu6': activation.ReLU6, - 'tanh': activation.Tanh, - 'gelu': activation.GELU, - 'fast_gelu': activation.FastGelu, - 'elu': activation.ELU, - 'sigmoid': activation.Sigmoid, - 'prelu': activation.PReLU, - 'leakyrelu': activation.LeakyReLU, - 'hswish': activation.HSwish, - 'hsigmoid': activation.HSigmoid, - 'logsigmoid': activation.LogSigmoid, - 'sin': ops.Sin -} diff --git a/MindChem/applications/deephe3nn/mindchemistry/cell/basic_block.py b/MindChem/applications/deephe3nn/mindchemistry/cell/basic_block.py deleted file mode 100644 index 6a83f67e0..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/cell/basic_block.py +++ /dev/null @@ -1,600 +0,0 @@ -# Copyright 2023 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""basic""" -from __future__ import absolute_import - -from collections.abc import Sequence -from typing import Union - -from mindspore import nn -from mindspore.nn.layer import activation -from mindspore import ops, float16, float32, Tensor -from mindspore.common.initializer import Initializer - -from .activation import _activation - - -def _get_dropout(dropout_rate): - """ - Gets the dropout functions. - - Inputs: - dropout_rate (Union[int, float]): The dropout rate of the dropout function. - If dropout_rate was int or not in range (0,1], it would be rectify to closest float value. - - Returns: - Function, the dropout function. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> import numpy as np - >>> from mindchemistry.cell import _get_dropout - >>> dropout = get_dropout(0.5) - >>> dropout.set_train - Dropout - """ - dropout_rate = float(max(min(dropout_rate, 1.), 1e-7)) - return nn.Dropout(keep_prob=dropout_rate) - - -def _get_layernorm(channel, epsilon): - """ - Gets the layer normalization functions. - - Inputs: - channel (Union[int, list]): The normalized shape of the layer normalization function. - If channel was int, it would be wrap into a list. - epsilon (float): The epsilon of the layer normalization function. - - Returns: - Function, the layer normalization function. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> import numpy as np - >>> from mindchemistry.cell import _get_layernorm - >>> from mindspore import Tensor - >>> input_x = Tensor(np.array([[1.2, 0.1], [0.2, 3.2]], dtype=np.float32)) - >>> layernorm = get_layernorm([2], 1e-7) - >>> output = layernorm(input_x) - >>> print(output) - [[ 9.99999881e-01, -9.99999881e-01], - [-1.00000000e+00, 1.00000000e+00]] - """ - if isinstance(channel, int): - channel = [channel] - return nn.LayerNorm(channel, epsilon=epsilon) - - -def _get_activation(name): - """ - Gets the activation function. - - Inputs: - name (Union[str, None]): The name of the activation function. If name was None, it would return []. - - Returns: - Function, the activation function. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> import numpy as np - >>> from mindchemistry.cell import _get_activation - >>> from mindspore import Tensor - >>> input_x = Tensor(np.array([[1.2, 0.1], [0.2, 3.2]], dtype=np.float32)) - >>> sigmoid = _get_activation('sigmoid') - >>> output = sigmoid(input_x) - >>> print(output) - [[0.7685248 0.5249792 ] - [0.54983395 0.96083426]] - """ - if name is None: - return [] - if isinstance(name, str): - name = name.lower() - if name not in _activation: - return activation.get_activation(name) - return _activation.get(name)() - return name - - -def _get_layer_arg(arguments, index): - """ - Gets the argument of each network layers. - - Inputs: - arguments (Union[str, int, float, List, None]): The arguments of each layers. - If arguments was List return the argument at the index of the List. - index (int): The index of layer in the network - - Returns: - Argument of the indexed layer. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> import numpy as np - >>> from mindchemistry.cell import _get_layer_arg - >>> from mindspore import Tensor - >>> dropout_rate = _get_layer_arg([0.1, 0.2, 0.3], index=2) - >>> print(dropout_rate) - 0.2 - >>> dropout_rate = _get_layer_arg(0.2, index=2) - >>> print(dropout_rate) - 0.2 - """ - if isinstance(arguments, list): - if len(arguments) <= index: - if len(arguments) == 1: - return [] if arguments[0] is None else arguments[0] - return [] - return [] if arguments[index] is None else arguments[index] - return [] if arguments is None else arguments - - -def get_linear_block( - in_channels, - out_channels, - weight_init='normal', - has_bias=True, - bias_init='zeros', - has_dropout=False, - dropout_rate=0.5, - has_layernorm=False, - layernorm_epsilon=1e-7, - has_activation=True, - act='relu' -): - """ - Gets the linear block list. - - Inputs: - in_channels (int): The number of input channel. - out_channels (int): The number of output channel. - weight_init (Union[str, float, mindspore.common.initializer]): The initializer of the weights of dense layer - has_bias (bool): The switch for whether dense layer has bias. - bias_init (Union[str, float, mindspore.common.initializer]): The initializer of the bias of dense layer - has_dropout (bool): The switch for whether linear block has a dropout layer. - dropout_rate (float): The dropout rate for dropout layer, the dropout rate must be a float in range (0, 1] - has_layernorm (bool): The switch for whether linear block has a layer normalization layer. - layernorm_epsilon (float): The hyper parameter epsilon for layer normalization layer. - has_activation (bool): The switch for whether linear block has an activation layer. - act (Union[str, None]): The activation function in linear block - - Returns: - List of mindspore.nn.Cell, linear block list . - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> import numpy as np - >>> from mindchemistry.cell import get_layer_arg - >>> from mindspore import Tensor - >>> dropout_rate = get_layer_arg([0.1, 0.2, 0.3], index=2) - >>> print(dropout_rate) - 0.2 - >>> dropout_rate = get_layer_arg(0.2, index=2) - >>> print(dropout_rate) - 0.2 - """ - dense = nn.Dense( - in_channels, out_channels, weight_init=weight_init, bias_init=bias_init, has_bias=has_bias, activation=None - ) - dropout = _get_dropout(dropout_rate) if (has_dropout is True) else [] - layernorm = _get_layernorm(out_channels, layernorm_epsilon) if (has_layernorm is True) else [] - act = _get_activation(act) if (has_activation is True) else [] - block_list = [dense, dropout, layernorm, act] - while [] in block_list: - block_list.remove([]) - return block_list - - -class FCNet(nn.Cell): - r""" - The Fully Connected Network. Applies a series of fully connected layers to the incoming data. - - Args: - channels (List): the list of numbers of channel of each fully connected layers. - weight_init (Union[str, float, mindspore.common.initializer, List]): initialize layer weights. - If weight_init was List, each element corresponds to each layer. Default: ``'normal'`` . - has_bias (Union[bool, List]): The switch for whether the dense layers has bias. - If has_bias was List, each element corresponds to each dense layer. Default: ``True`` . - bias_init (Union[str, float, mindspore.common.initializer, List]): The initializer of the bias of dense - layer. If bias_init was List, each element corresponds to each dense layer. Default: ``'zeros'`` . - has_dropout (Union[bool, List]): The switch for whether linear block has a dropout layer. - If has_dropout was List, each element corresponds to each layer. Default: ``False`` . - dropout_rate (float): The dropout rate for dropout layer, the dropout rate must be a float in range (0, 1] - If dropout_rate was List, each element corresponds to each dropout layer. Default: ``0.5`` . - has_layernorm (Union[bool, List]): The switch for whether linear block has a layer normalization layer. - If has_layernorm was List, each element corresponds to each layer. Default: ``False`` . - layernorm_epsilon (float): The hyper parameter epsilon for layer normalization layer. - If layernorm_epsilon was List, each element corresponds to each layer normalization layer. - Default: ``1e-7`` . - has_activation (Union[bool, List]): The switch for whether linear block has an activation layer. - If has_activation was List, each element corresponds to each layer. Default: ``True`` . - act (Union[str, None, List]): The activation function in linear block. - If act was List, each element corresponds to each activation layer. Default: ``'relu'`` . - - Inputs: - - **input** (Tensor) - The shape of Tensor is :math:`(*, channels[0])`. - - Outputs: - - **output** (Tensor) - The shape of Tensor is :math:`(*, channels[-1])`. - - Supported Platforms: - ``Ascend`` - - Examples: - >>> import numpy as np - >>> from mindchemistry.cell import FCNet - >>> from mindspore import Tensor - >>> inputs = Tensor(np.array([[180, 234, 154], [244, 48, 247]], np.float32)) - >>> net = FCNet([3, 16, 32, 16, 8]) - >>> output = net(inputs) - >>> print(output.shape) - (2, 8) - - """ - - def __init__( - self, - channels, - weight_init='normal', - has_bias=True, - bias_init='zeros', - has_dropout=False, - dropout_rate=0.5, - has_layernorm=False, - layernorm_epsilon=1e-7, - has_activation=True, - act='relu' - ): - super().__init__() - self.channels = channels - self.weight_init = weight_init - self.has_bias = has_bias - self.bias_init = bias_init - self.has_dropout = has_dropout - self.dropout_rate = dropout_rate - self.has_layernorm = has_layernorm - self.layernorm_epsilon = layernorm_epsilon - self.has_activation = has_activation - self.activation = act - self.network = nn.SequentialCell(self._create_network()) - - def _create_network(self): - """ create the network """ - cell_list = [] - for i in range(len(self.channels) - 1): - cell_list += get_linear_block( - self.channels[i], - self.channels[i + 1], - weight_init=_get_layer_arg(self.weight_init, i), - has_bias=_get_layer_arg(self.has_bias, i), - bias_init=_get_layer_arg(self.bias_init, i), - has_dropout=_get_layer_arg(self.has_dropout, i), - dropout_rate=_get_layer_arg(self.dropout_rate, i), - has_layernorm=_get_layer_arg(self.has_layernorm, i), - layernorm_epsilon=_get_layer_arg(self.layernorm_epsilon, i), - has_activation=_get_layer_arg(self.has_activation, i), - act=_get_layer_arg(self.activation, i) - ) - return cell_list - - def construct(self, x): - return self.network(x) - - -class MLPNet(nn.Cell): - r""" - The MLPNet Network. Applies a series of fully connected layers to the incoming data among which hidden layers have - same number of channels. - - Args: - in_channels (int): the number of input layer channel. - out_channels (int): the number of output layer channel. - layers (int): the number of layers. - neurons (int): the number of channels of hidden layers. - weight_init (Union[str, float, mindspore.common.initializer, List]): initialize layer weights. - If weight_init was List, each element corresponds to each layer. Default: ``'normal'`` . - has_bias (Union[bool, List]): The switch for whether the dense layers has bias. - If has_bias was List, each element corresponds to each dense layer. Default: ``True`` . - bias_init (Union[str, float, mindspore.common.initializer, List]): The initializer of the bias of dense - layer. If bias_init was List, each element corresponds to each dense layer. Default: ``'zeros'`` . - has_dropout (Union[bool, List]): The switch for whether linear block has a dropout layer. - If has_dropout was List, each element corresponds to each layer. Default: ``False`` . - dropout_rate (float): The dropout rate for dropout layer, the dropout rate must be a float in range (0, 1] . - If dropout_rate was List, each element corresponds to each dropout layer. Default: ``0.5`` . - has_layernorm (Union[bool, List]): The switch for whether linear block has a layer normalization layer. - If has_layernorm was List, each element corresponds to each layer. Default: ``False`` . - layernorm_epsilon (float): The hyper parameter epsilon for layer normalization layer. - If layernorm_epsilon was List, each element corresponds to each layer normalization layer. - Default: ``1e-7`` . - has_activation (Union[bool, List]): The switch for whether linear block has an activation layer. - If has_activation was List, each element corresponds to each layer. Default: ``True`` . - act (Union[str, None, List]): The activation function in linear block. - If act was List, each element corresponds to each activation layer. Default: ``'relu'`` . - - Inputs: - - **input** (Tensor) - The shape of Tensor is :math:`(*, channels[0])`. - - Outputs: - - **output** (Tensor) - The shape of Tensor is :math:`(*, channels[-1])`. - - Supported Platforms: - ``Ascend`` - - Examples: - >>> import numpy as np - >>> from mindchemistry.cell import FCNet - >>> from mindspore import Tensor - >>> inputs = Tensor(np.array([[180, 234, 154], [244, 48, 247]], np.float32)) - >>> net = MLPNet(in_channels=3, out_channels=8, layers=5, neurons=32) - >>> output = net(inputs) - >>> print(output.shape) - (2, 8) - - """ - - def __init__( - self, - in_channels, - out_channels, - layers, - neurons, - weight_init='normal', - has_bias=True, - bias_init='zeros', - has_dropout=False, - dropout_rate=0.5, - has_layernorm=False, - layernorm_epsilon=1e-7, - has_activation=True, - act='relu' - ): - super().__init__() - self.channels = (in_channels,) + (layers - 2) * \ - (neurons,) + (out_channels,) - self.network = FCNet( - channels=self.channels, - weight_init=weight_init, - has_bias=has_bias, - bias_init=bias_init, - has_dropout=has_dropout, - dropout_rate=dropout_rate, - has_layernorm=has_layernorm, - layernorm_epsilon=layernorm_epsilon, - has_activation=has_activation, - act=act - ) - - def construct(self, x): - return self.network(x) - - -class MLPMixPrecision(nn.Cell): - """MLPMixPrecision - """ - - def __init__( - self, - input_dim: int, - hidden_dims: Sequence, - short_cut=False, - batch_norm=False, - activation_fn='relu', - has_bias=False, - weight_init: Union[Initializer, str] = 'xavier_uniform', - bias_init: Union[Initializer, str] = 'zeros', - dropout=0, - dtype=float32 - ): - super().__init__() - self.dtype = dtype - self.div = ops.Div() - - self.dims = [input_dim] + hidden_dims - self.short_cut = short_cut - self.nonlinear_const = 1.0 - if isinstance(activation_fn, str): - self.activation = _activation.get(activation_fn)() - if activation_fn is not None and activation_fn == 'silu': - self.nonlinear_const = 1.679177 - else: - self.activation = activation_fn - self.dropout = None - if dropout: - self.dropout = nn.Dropout(dropout) - fcs = [ - nn.Dense(dim, self.dims[i + 1], weight_init=weight_init, bias_init=bias_init, - has_bias=has_bias).to_float(self.dtype) for i, dim in enumerate(self.dims[:-1]) - ] - self.layers = nn.CellList(fcs) - self.batch_norms = None - if batch_norm: - bns = [nn.BatchNorm1d(dim) for dim in self.dims[1:-1]] - self.batch_norms = nn.CellList(bns) - - def construct(self, inputs): - """construct - - Args: - inputs: inputs - - Returns: - inputs - """ - hidden = inputs - norm_from_last = 1.0 - for i, layer in enumerate(self.layers): - sqrt_dim = ops.sqrt(Tensor(float(self.dims[i]))) - layer_hidden = layer(hidden) - if self.dtype == float16: - layer_hidden = layer_hidden.astype(float16) - hidden = self.div(layer_hidden * norm_from_last, sqrt_dim) - norm_from_last = self.nonlinear_const - if i < len(self.layers) - 1: - if self.batch_norms is not None: - x = hidden.flatten(0, -2) - hidden = self.batch_norms[i](x).view_as(hidden) - if self.activation is not None: - hidden = self.activation(hidden) - if self.dropout is not None: - hidden = self.dropout(hidden) - if self.short_cut and hidden.shape == hidden.shape: - hidden += inputs - return hidden - - -class AutoEncoder(nn.Cell): - r""" - The AutoEncoder Network. - Applies an encoder to get the latent code and applies a decoder to get the reconstruct data. - - Args: - channels (list): The number of channels of each encoder and decoder layer. - weight_init (Union[str, float, mindspore.common.initializer, List]): initialize layer parameters. - If weight_init was List, each element corresponds to each layer. Default: ``'normal'`` . - has_bias (Union[bool, List]): The switch for whether the dense layers has bias. - If has_bias was List, each element corresponds to each dense layer. Default: ``True`` . - bias_init (Union[str, float, mindspore.common.initializer, List]): initialize layer parameters. - If bias_init was List, each element corresponds to each dense layer. Default: ``'zeros'`` . - has_dropout (Union[bool, List]): The switch for whether linear block has a dropout layer. - If has_dropout was List, each element corresponds to each layer. Default: ``False`` . - dropout_rate (float): The dropout rate for dropout layer, the dropout rate must be a float in range (0, 1] - If dropout_rate was List, each element corresponds to each dropout layer. Default: ``0.5`` . - has_layernorm (Union[bool, List]): The switch for whether linear block has a layer normalization layer. - If has_layernorm was List, each element corresponds to each layer. Default: ``False`` . - layernorm_epsilon (float): The hyper parameter epsilon for layer normalization layer. - If layernorm_epsilon was List, each element corresponds to each layer normalization layer. - Default: ``1e-7`` . - has_activation (Union[bool, List]): The switch for whether linear block has an activation layer. - If has_activation was List, each element corresponds to each layer. Default: ``True`` . - act (Union[str, None, List]): The activation function in linear block. - If act was List, each element corresponds to each activation layer. Default: ``'relu'`` . - out_act (Union[None, str, mindspore.nn.Cell]): The activation function to output layer. Default: ``None`` . - - Inputs: - - **x** (Tensor) - The shape of Tensor is :math:`(*, channels[0])`. - - Outputs: - - **latents** (Tensor) - The shape of Tensor is :math:`(*, channels[-1])`. - - **x_recon** (Tensor) - The shape of Tensor is :math:`(*, channels[0])`. - - Supported Platforms: - ``Ascend`` - - Examples: - >>> import numpy as np - >>> from mindchemistry import AutoEncoder - >>> from mindspore import Tensor - >>> inputs = Tensor(np.array([[180, 234, 154], [244, 48, 247]], np.float32)) - >>> net = AutoEncoder([3, 6, 2]) - >>> output = net(inputs) - >>> print(output[0].shape, output[1].shape) - (2, 2) (2, 3) - - """ - - def __init__( - self, - channels, - weight_init='normal', - has_bias=True, - bias_init='zeros', - has_dropout=False, - dropout_rate=0.5, - has_layernorm=False, - layernorm_epsilon=1e-7, - has_activation=True, - act='relu', - out_act=None - ): - super().__init__() - self.channels = channels - self.weight_init = weight_init - self.bias_init = bias_init - self.has_bias = has_bias - self.has_dropout = has_dropout - self.dropout_rate = dropout_rate - self.has_layernorm = has_layernorm - self.has_activation = has_activation - self.layernorm_epsilon = layernorm_epsilon - self.activation = act - self.output_activation = out_act - self.encoder = nn.SequentialCell(self._create_encoder()) - self.decoder = nn.SequentialCell(self._create_decoder()) - - def _create_encoder(self): - """ create the network encoder """ - encoder_cell_list = [] - for i in range(len(self.channels) - 1): - encoder_cell_list += get_linear_block( - self.channels[i], - self.channels[i + 1], - weight_init=_get_layer_arg(self.weight_init, i), - has_bias=_get_layer_arg(self.has_bias, i), - bias_init=_get_layer_arg(self.bias_init, i), - has_dropout=_get_layer_arg(self.has_dropout, i), - dropout_rate=_get_layer_arg(self.dropout_rate, i), - has_layernorm=_get_layer_arg(self.has_layernorm, i), - layernorm_epsilon=_get_layer_arg(self.layernorm_epsilon, i), - has_activation=_get_layer_arg(self.has_activation, i), - act=_get_layer_arg(self.activation, i) - ) - return encoder_cell_list - - def _create_decoder(self): - """ create the network decoder """ - decoder_channels = self.channels[::-1] - decoder_weight_init = self.weight_init[::-1] if isinstance(self.weight_init, list) else self.weight_init - decoder_bias_init = self.bias_init[::-1] if isinstance(self.bias_init, list) else self.bias_init - decoder_cell_list = [] - for i in range(len(decoder_channels) - 1): - decoder_cell_list += get_linear_block( - decoder_channels[i], - decoder_channels[i + 1], - weight_init=_get_layer_arg(decoder_weight_init, i), - has_bias=_get_layer_arg(self.has_bias, i), - bias_init=_get_layer_arg(decoder_bias_init, i), - has_dropout=_get_layer_arg(self.has_dropout, i), - dropout_rate=_get_layer_arg(self.dropout_rate, i), - has_layernorm=_get_layer_arg(self.has_layernorm, i), - layernorm_epsilon=_get_layer_arg(self.layernorm_epsilon, i), - has_activation=_get_layer_arg(self.has_activation, i), - act=_get_layer_arg(self.activation, i) - ) - if self.output_activation is not None: - decoder_cell_list.append(_get_activation(self.output_activation)) - return decoder_cell_list - - def encode(self, x): - return self.encoder(x) - - def decode(self, z): - return self.decoder(z) - - def construct(self, x): - latents = self.encode(x) - x_recon = self.decode(latents) - return x_recon, latents diff --git a/MindChem/applications/deephe3nn/mindchemistry/cell/convolution.py b/MindChem/applications/deephe3nn/mindchemistry/cell/convolution.py deleted file mode 100644 index 1fa168da2..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/cell/convolution.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""convolution""" -from mindspore import nn, ops, float32 -from mindscience.e3nn.o3 import TensorProduct, Irreps, Linear -from mindscience.e3nn.nn import FullyConnectedNet -from ..graph.graph import AggregateEdgeToNode - -softplus = ops.Softplus() - - -def shift_softplus(x): - return softplus(x) - 0.6931471805599453 - - -def silu(x): - return x * ops.sigmoid(x) - - -class Convolution(nn.Cell): - r""" - InteractionBlock. - - Args: - irreps_node_input: Input Features, default = None - irreps_node_attr: Nodes attribute irreps - irreps_node_output: Output irreps, in our case typically a single scalar - irreps_edge_attr: Edge attribute irreps - invariant_layers: Number of invariant layers, default = 1 - invariant_neurons: Number of hidden neurons in invariant function, default = 8 - avg_num_neighbors: Number of neighbors to divide by, default None => no normalization. - use_sc(bool): use self-connection or not - """ - - def __init__(self, - irreps_node_input, - irreps_node_attr, - irreps_node_output, - irreps_edge_attr, - irreps_edge_scalars, - invariant_layers=1, - invariant_neurons=8, - avg_num_neighbors=None, - use_sc=True, - nonlin_scalars=None, - dtype=float32, - ncon_dtype=float32): - super().__init__() - self.avg_num_neighbors = avg_num_neighbors - self.use_sc = use_sc - - self.irreps_node_input = Irreps(irreps_node_input) - self.irreps_node_attr = Irreps(irreps_node_attr) - self.irreps_node_output = Irreps(irreps_node_output) - self.irreps_edge_attr = Irreps(irreps_edge_attr) - self.irreps_edge_scalars = Irreps([(irreps_edge_scalars.num_irreps, (0, 1))]) - - self.lin1 = Linear(self.irreps_node_input, self.irreps_node_input, dtype=dtype, ncon_dtype=ncon_dtype) - - tp = TensorProduct(self.irreps_node_input, - self.irreps_edge_attr, - self.irreps_node_output, - 'merge', - weight_mode='custom', - dtype=dtype, - ncon_dtype=ncon_dtype) - - self.fc = FullyConnectedNet([self.irreps_edge_scalars.num_irreps] + invariant_layers * [invariant_neurons] + - [tp.weight_numel], { - "ssp": shift_softplus, - "silu": ops.silu, - }.get(nonlin_scalars.get("e", None), None), dtype=dtype) - - self.tp = tp - self.scatter = AggregateEdgeToNode(dim=1) - - self.lin2 = Linear(tp.irreps_out.simplify(), self.irreps_node_output, dtype=dtype, ncon_dtype=ncon_dtype) - - self.sc = None - if self.use_sc: - self.sc = TensorProduct(self.irreps_node_input, - self.irreps_node_attr, - self.irreps_node_output, - 'connect', - dtype=dtype, - ncon_dtype=ncon_dtype) - - def construct(self, node_input, node_attr, edge_src, edge_dst, edge_attr, edge_scalars): - """Evaluate interaction Block with resnet""" - weight = self.fc(edge_scalars) - - node_features = self.lin1(node_input) - - edge_features = self.tp(node_features[edge_src], edge_attr, weight) - - node_features = self.scatter(edge_attr=edge_features, edge_index=[edge_src, edge_dst], - dim_size=node_input.shape[0]) - - if self.avg_num_neighbors is not None: - node_features = node_features.div(self.avg_num_neighbors**0.5) - - node_features = self.lin2(node_features) - - if self.sc is not None: - sc = self.sc(node_input, node_attr) - node_features = node_features + sc - - return node_features diff --git a/MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/__init__.py b/MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/__init__.py deleted file mode 100644 index aa7b8cebb..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2024 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""initialization for deephe3nn""" - -from .e3modules import E3LayerNorm -from .model import Net - -__all__ = ["Net", "E3LayerNorm"] diff --git a/MindChem/applications/deephe3nn/mindchemistry/cell/embedding.py b/MindChem/applications/deephe3nn/mindchemistry/cell/embedding.py deleted file mode 100644 index 901cec030..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/cell/embedding.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""MessagePassing""" -from mindspore import nn, ops, float32 - -from mindscience.e3nn.o3 import Irreps -from mindscience.e3nn.nn import Gate, NormActivation -from .convolution import Convolution, shift_softplus - -acts = { - "abs": ops.abs, - "tanh": ops.tanh, - "ssp": shift_softplus, - "silu": ops.silu, -} - - -class Compose(nn.Cell): - def __init__(self, first, second): - super().__init__() - self.first = first - self.second = second - - def construct(self, *inputs): - x = self.first(*inputs) - x = self.second(x) - return x - - -class MessagePassing(nn.Cell): - """MessagePassing""" - # pylint: disable=W0102 - def __init__( - self, - irreps_node_input, - irreps_node_attr, - irreps_node_hidden, - irreps_node_output, - irreps_edge_attr, - irreps_edge_scalars, - convolution_kwargs={}, - num_layers=3, - resnet=False, - nonlin_type="gate", - nonlin_scalars={"e": "ssp", "o": "tanh"}, - nonlin_gates={"e": "ssp", "o": "abs"}, - dtype=float32, - ncon_dtype=float32 - ): - super().__init__() - if nonlin_type not in ('gate', 'norm'): - raise ValueError(f"Unexpected nonlin_type {nonlin_type}.") - - nonlin_scalars = { - 1: nonlin_scalars["e"], - -1: nonlin_scalars["o"], - } - nonlin_gates = { - 1: nonlin_gates["e"], - -1: nonlin_gates["o"], - } - - self.irreps_node_input = Irreps(irreps_node_input) - self.irreps_node_hidden = Irreps(irreps_node_hidden) - self.irreps_node_output = Irreps(irreps_node_output) - self.irreps_node_attr = Irreps(irreps_node_attr) - self.irreps_edge_attr = Irreps(irreps_edge_attr) - self.irreps_edge_scalars = Irreps(irreps_edge_scalars) - - irreps_node = self.irreps_node_input - irreps_prev = irreps_node - self.layers = nn.CellList() - self.resnets = [] - - for _ in range(num_layers): - tmp_irreps = irreps_node * self.irreps_edge_attr - - irreps_scalars = Irreps( - [ - (mul, ir) - for mul, ir in self.irreps_node_hidden - if ir.l == 0 and ir in tmp_irreps - ] - ).simplify() - irreps_gated = Irreps( - [ - (mul, ir) - for mul, ir in self.irreps_node_hidden - if ir.l > 0 and ir in tmp_irreps - ] - ) - - if nonlin_type == "gate": - ir = "0e" if Irreps("0e") in tmp_irreps else "0o" - irreps_gates = Irreps([(mul, ir) - for mul, _ in irreps_gated]).simplify() - - nonlinear = Gate( - irreps_scalars, - [acts[nonlin_scalars[ir.p]] for _, ir in irreps_scalars], - irreps_gates, - [acts[nonlin_gates[ir.p]] for _, ir in irreps_gates], - irreps_gated, - dtype=dtype, - ncon_dtype=ncon_dtype - ) - - conv_irreps_out = nonlinear.irreps_in - else: - conv_irreps_out = (irreps_scalars + irreps_gated).simplify() - - nonlinear = NormActivation( - irreps_in=conv_irreps_out, - act=acts[nonlin_scalars[1]], - normalize=True, - epsilon=1e-8, - bias=False, - dtype=dtype, - ncon_dtype=ncon_dtype - ) - - conv = Convolution( - irreps_node_input=irreps_node, - irreps_node_attr=self.irreps_node_attr, - irreps_node_output=conv_irreps_out, - irreps_edge_attr=self.irreps_edge_attr, - irreps_edge_scalars=self.irreps_edge_scalars, - **convolution_kwargs, - dtype=dtype, - ncon_dtype=ncon_dtype - ) - irreps_node = nonlinear.irreps_out - - self.layers.append(Compose(conv, nonlinear)) - - if irreps_prev == irreps_node and resnet: - self.resnets.append(True) - else: - self.resnets.append(False) - irreps_prev = irreps_node - - def construct(self, node_input, node_attr, edge_src, edge_dst, edge_attr, edge_scalars): - """construct""" - layer_in = node_input - for i in enumerate(self.layers): - layer_out = self.layers[i]( - layer_in, node_attr, edge_src, edge_dst, edge_attr, edge_scalars) - - if self.resnets[i]: - layer_in = layer_out + layer_in - else: - layer_in = layer_out - return layer_in diff --git a/MindChem/applications/deephe3nn/mindchemistry/cell/message_passing.py b/MindChem/applications/deephe3nn/mindchemistry/cell/message_passing.py deleted file mode 100644 index 901cec030..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/cell/message_passing.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""MessagePassing""" -from mindspore import nn, ops, float32 - -from mindscience.e3nn.o3 import Irreps -from mindscience.e3nn.nn import Gate, NormActivation -from .convolution import Convolution, shift_softplus - -acts = { - "abs": ops.abs, - "tanh": ops.tanh, - "ssp": shift_softplus, - "silu": ops.silu, -} - - -class Compose(nn.Cell): - def __init__(self, first, second): - super().__init__() - self.first = first - self.second = second - - def construct(self, *inputs): - x = self.first(*inputs) - x = self.second(x) - return x - - -class MessagePassing(nn.Cell): - """MessagePassing""" - # pylint: disable=W0102 - def __init__( - self, - irreps_node_input, - irreps_node_attr, - irreps_node_hidden, - irreps_node_output, - irreps_edge_attr, - irreps_edge_scalars, - convolution_kwargs={}, - num_layers=3, - resnet=False, - nonlin_type="gate", - nonlin_scalars={"e": "ssp", "o": "tanh"}, - nonlin_gates={"e": "ssp", "o": "abs"}, - dtype=float32, - ncon_dtype=float32 - ): - super().__init__() - if nonlin_type not in ('gate', 'norm'): - raise ValueError(f"Unexpected nonlin_type {nonlin_type}.") - - nonlin_scalars = { - 1: nonlin_scalars["e"], - -1: nonlin_scalars["o"], - } - nonlin_gates = { - 1: nonlin_gates["e"], - -1: nonlin_gates["o"], - } - - self.irreps_node_input = Irreps(irreps_node_input) - self.irreps_node_hidden = Irreps(irreps_node_hidden) - self.irreps_node_output = Irreps(irreps_node_output) - self.irreps_node_attr = Irreps(irreps_node_attr) - self.irreps_edge_attr = Irreps(irreps_edge_attr) - self.irreps_edge_scalars = Irreps(irreps_edge_scalars) - - irreps_node = self.irreps_node_input - irreps_prev = irreps_node - self.layers = nn.CellList() - self.resnets = [] - - for _ in range(num_layers): - tmp_irreps = irreps_node * self.irreps_edge_attr - - irreps_scalars = Irreps( - [ - (mul, ir) - for mul, ir in self.irreps_node_hidden - if ir.l == 0 and ir in tmp_irreps - ] - ).simplify() - irreps_gated = Irreps( - [ - (mul, ir) - for mul, ir in self.irreps_node_hidden - if ir.l > 0 and ir in tmp_irreps - ] - ) - - if nonlin_type == "gate": - ir = "0e" if Irreps("0e") in tmp_irreps else "0o" - irreps_gates = Irreps([(mul, ir) - for mul, _ in irreps_gated]).simplify() - - nonlinear = Gate( - irreps_scalars, - [acts[nonlin_scalars[ir.p]] for _, ir in irreps_scalars], - irreps_gates, - [acts[nonlin_gates[ir.p]] for _, ir in irreps_gates], - irreps_gated, - dtype=dtype, - ncon_dtype=ncon_dtype - ) - - conv_irreps_out = nonlinear.irreps_in - else: - conv_irreps_out = (irreps_scalars + irreps_gated).simplify() - - nonlinear = NormActivation( - irreps_in=conv_irreps_out, - act=acts[nonlin_scalars[1]], - normalize=True, - epsilon=1e-8, - bias=False, - dtype=dtype, - ncon_dtype=ncon_dtype - ) - - conv = Convolution( - irreps_node_input=irreps_node, - irreps_node_attr=self.irreps_node_attr, - irreps_node_output=conv_irreps_out, - irreps_edge_attr=self.irreps_edge_attr, - irreps_edge_scalars=self.irreps_edge_scalars, - **convolution_kwargs, - dtype=dtype, - ncon_dtype=ncon_dtype - ) - irreps_node = nonlinear.irreps_out - - self.layers.append(Compose(conv, nonlinear)) - - if irreps_prev == irreps_node and resnet: - self.resnets.append(True) - else: - self.resnets.append(False) - irreps_prev = irreps_node - - def construct(self, node_input, node_attr, edge_src, edge_dst, edge_attr, edge_scalars): - """construct""" - layer_in = node_input - for i in enumerate(self.layers): - layer_out = self.layers[i]( - layer_in, node_attr, edge_src, edge_dst, edge_attr, edge_scalars) - - if self.resnets[i]: - layer_in = layer_out + layer_in - else: - layer_in = layer_out - return layer_in diff --git a/MindChem/applications/deephe3nn/mindchemistry/graph/__init__.py b/MindChem/applications/deephe3nn/mindchemistry/graph/__init__.py deleted file mode 100644 index 1ae7d9a34..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/graph/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2024 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""graph""" diff --git a/MindChem/applications/deephe3nn/mindchemistry/graph/dataloader.py b/MindChem/applications/deephe3nn/mindchemistry/graph/dataloader.py deleted file mode 100644 index 6af294afb..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/graph/dataloader.py +++ /dev/null @@ -1,408 +0,0 @@ -# Copyright 2024 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""dataloader -""" -import random -import numpy as np -from mindspore import Tensor -import mindspore as ms - - -class DataLoaderBase: - r""" - DataLoader that stacks a batch of graph data to fixed-size Tensors - - For specific dataset, usually the following functions should be customized to include different fields: - __init__, shuffle_action, __iter__ - - """ - - def __init__(self, - batch_size, - edge_index, - label=None, - node_attr=None, - edge_attr=None, - padding_std_ratio=3.5, - dynamic_batch_size=True, - shuffle_dataset=True, - max_node=None, - max_edge=None): - self.batch_size = batch_size - self.edge_index = edge_index - self.index = 0 - self.step = 0 - self.padding_std_ratio = padding_std_ratio - self.batch_change_num = 0 - self.batch_exceeding_num = 0 - self.dynamic_batch_size = dynamic_batch_size - self.shuffle_dataset = shuffle_dataset - - ### can be customized to specific dataset - self.label = label - self.node_attr = node_attr - self.edge_attr = edge_attr - self.sample_num = len(self.node_attr) - batch_size_div = self.batch_size - if batch_size_div != 0: - self.step_num = int(self.sample_num / batch_size_div) - else: - raise ValueError - - if dynamic_batch_size: - self.max_start_sample = self.sample_num - else: - self.max_start_sample = self.sample_num - self.batch_size + 1 - - self.set_global_max_node_edge_num(self.node_attr, self.edge_attr, max_node, max_edge, shuffle_dataset, - dynamic_batch_size) - ####### - - def __len__(self): - return self.sample_num - - ### example of generating data of each step, can be customized to specific dataset - def __iter__(self): - if self.shuffle_dataset: - self.shuffle() - else: - self.restart() - - while self.index < self.max_start_sample: - # pylint: disable=W0612 - edge_index_step, node_batch_step, node_mask, edge_mask, batch_size_mask, node_num, edge_num, batch_size \ - = self.gen_common_data(self.node_attr, self.edge_attr) - - ### can be customized to generate different attributes or labels according to specific dataset - node_attr_step = self.gen_node_attr(self.node_attr, batch_size, node_num) - edge_attr_step = self.gen_edge_attr(self.edge_attr, batch_size, edge_num) - label_step = self.gen_global_attr(self.label, batch_size) - - self.add_step_index(batch_size) - - ### make number to Tensor, if it is used as a Tensor in the network - node_num = Tensor(node_num) - batch_size = Tensor(batch_size) - - yield node_attr_step, edge_attr_step, label_step, edge_index_step, node_batch_step, \ - node_mask, edge_mask, node_num, batch_size - - @staticmethod - def pad_zero_to_end(src, axis, zeros_len): - """pad_zero_to_end""" - pad_shape = [] - for i in range(src.ndim): - if i == axis: - pad_shape.append((0, zeros_len)) - else: - pad_shape.append((0, 0)) - return np.pad(src, pad_shape) - - @staticmethod - def gen_mask(total_len, real_len): - """gen_mask""" - mask = np.concatenate((np.full((real_len,), np.float32(1)), np.full((total_len - real_len,), np.float32(0)))) - return mask - - ### example of computing global max length of node_attr and edge_attr, can be customized to specific dataset - def set_global_max_node_edge_num(self, - node_attr, - edge_attr, - max_node=None, - max_edge=None, - shuffle_dataset=True, - dynamic_batch_size=True): - """set_global_max_node_edge_num - - Args: - node_attr: node_attr - edge_attr: edge_attr - max_node: max_node. Defaults to None. - max_edge: max_edge. Defaults to None. - shuffle_dataset: shuffle_dataset. Defaults to True. - dynamic_batch_size: dynamic_batch_size. Defaults to True. - - Raises: - ValueError: ValueError - """ - if not shuffle_dataset: - max_node_num, max_edge_num = self.get_max_node_edge_num(node_attr, edge_attr, dynamic_batch_size) - self.max_node_num_global = max_node_num if max_node is None else max(max_node, max_node_num) - self.max_edge_num_global = max_edge_num if max_edge is None else max(max_edge, max_edge_num) - return - - sum_node = 0 - sum_edge = 0 - count = 0 - max_node_single = 0 - max_edge_single = 0 - for step in range(self.sample_num): - node_len = len(node_attr[step]) - edge_len = len(edge_attr[step]) - sum_node += node_len - sum_edge += edge_len - max_node_single = max(max_node_single, node_len) - max_edge_single = max(max_edge_single, edge_len) - count += 1 - if count != 0: - mean_node = sum_node / count - mean_edge = sum_edge / count - else: - raise ValueError - - if max_node is not None and max_edge is not None: - if max_node < max_node_single: - raise ValueError( - f"the max_node {max_node} is less than the max length of a single sample {max_node_single}") - if max_edge < max_edge_single: - raise ValueError( - f"the max_edge {max_edge} is less than the max length of a single sample {max_edge_single}") - - self.max_node_num_global = max_node - self.max_edge_num_global = max_edge - elif max_node is None and max_edge is None: - sum_node = 0 - sum_edge = 0 - for step in range(self.sample_num): - sum_node += (len(node_attr[step]) - mean_node) ** 2 - sum_edge += (len(edge_attr[step]) - mean_edge) ** 2 - - if count != 0: - std_node = np.sqrt(sum_node / count) - std_edge = np.sqrt(sum_edge / count) - else: - raise ValueError - - self.max_node_num_global = int(self.batch_size * mean_node + - self.padding_std_ratio * np.sqrt(self.batch_size) * std_node) - self.max_edge_num_global = int(self.batch_size * mean_edge + - self.padding_std_ratio * np.sqrt(self.batch_size) * std_edge) - self.max_node_num_global = max(self.max_node_num_global, max_node_single) - self.max_edge_num_global = max(self.max_edge_num_global, max_edge_single) - elif max_node is None: - if max_edge < max_edge_single: - raise ValueError( - f"the max_edge {max_edge} is less than the max length of a single sample {max_edge_single}") - - if mean_edge != 0: - self.max_node_num_global = int(max_edge * mean_node / mean_edge) - else: - raise ValueError - self.max_node_num_global = max(self.max_node_num_global, max_node_single) - self.max_edge_num_global = max_edge - else: - if max_node < max_node_single: - raise ValueError( - f"the max_node {max_node} is less than the max length of a single sample {max_node_single}") - - self.max_node_num_global = max_node - if mean_node != 0: - self.max_edge_num_global = int(max_node * mean_edge / mean_node) - else: - raise ValueError - self.max_edge_num_global = max(self.max_edge_num_global, max_edge_single) - - def get_max_node_edge_num(self, node_attr, edge_attr, remainder=True): - """get_max_node_edge_num - - Args: - node_attr: node_attr - edge_attr: edge_attr - remainder (bool, optional): remainder. Defaults to True. - - Returns: - max_node_num, max_edge_num - """ - max_node_num = 0 - max_edge_num = 0 - index = 0 - for _ in range(self.step_num): - node_num = 0 - edge_num = 0 - for _ in range(self.batch_size): - node_num += len(node_attr[index]) - edge_num += len(edge_attr[index]) - index += 1 - max_node_num = max(max_node_num, node_num) - max_edge_num = max(max_edge_num, edge_num) - - if remainder: - remain_num = self.sample_num - index - 1 - node_num = 0 - edge_num = 0 - for _ in range(remain_num): - node_num += len(node_attr[index]) - edge_num += len(edge_attr[index]) - index += 1 - max_node_num = max(max_node_num, node_num) - max_edge_num = max(max_edge_num, edge_num) - - return max_node_num, max_edge_num - - def shuffle_index(self): - """shuffle_index""" - indices = list(range(self.sample_num)) - random.shuffle(indices) - return indices - - ### example of shuffling the input dataset, can be customized to specific dataset - def shuffle_action(self): - """shuffle_action""" - indices = self.shuffle_index() - self.edge_index = [self.edge_index[i] for i in indices] - self.label = [self.label[i] for i in indices] - self.node_attr = [self.node_attr[i] for i in indices] - self.edge_attr = [self.edge_attr[i] for i in indices] - - ### example of generating the final shuffled dataset, can be customized to specific dataset - def shuffle(self): - """shuffle""" - self.shuffle_action() - if not self.dynamic_batch_size: - max_node_num, max_edge_num = self.get_max_node_edge_num(self.node_attr, self.edge_attr, remainder=False) - while max_node_num > self.max_node_num_global or max_edge_num > self.max_edge_num_global: - self.shuffle_action() - max_node_num, max_edge_num = self.get_max_node_edge_num(self.node_attr, self.edge_attr, remainder=False) - - self.step = 0 - self.index = 0 - - def restart(self): - """restart""" - self.step = 0 - self.index = 0 - - ### example of calculating dynamic batch size to avoid exceeding the max length of node and edge, can be customized to specific dataset - def get_batch_size(self, node_attr, edge_attr, start_batch_size): - """get_batch_size - - Args: - node_attr: node_attr - edge_attr: edge_attr - start_batch_size: start_batch_size - - Returns: - batch_size - """ - node_num = 0 - edge_num = 0 - for i in range(start_batch_size): - index = self.index + i - node_num += len(node_attr[index]) - edge_num += len(edge_attr[index]) - - exceeding = False - while node_num > self.max_node_num_global or edge_num > self.max_edge_num_global: - node_num -= len(node_attr[index]) - edge_num -= len(edge_attr[index]) - index -= 1 - exceeding = True - self.batch_exceeding_num += 1 - if exceeding: - self.batch_change_num += 1 - - return index - self.index + 1 - - def gen_common_data(self, node_attr, edge_attr): - """gen_common_data - - Args: - node_attr: node_attr - edge_attr: edge_attr - - Returns: - common_data - """ - if self.dynamic_batch_size: - if self.step >= self.step_num: - batch_size = self.get_batch_size(node_attr, edge_attr, - min((self.sample_num - self.index), self.batch_size)) - else: - batch_size = self.get_batch_size(node_attr, edge_attr, self.batch_size) - else: - batch_size = self.batch_size - - ######################## node_batch - node_batch_step = [] - sample_num = 0 - for i in range(self.index, self.index + batch_size): - node_batch_step.extend([sample_num] * node_attr[i].shape[0]) - sample_num += 1 - node_batch_step = np.array(node_batch_step) - node_num = node_batch_step.shape[0] - - ######################## edge_index - edge_index_step = np.array([[], []], dtype=np.int64) - max_edge_index = 0 - for i in range(self.index, self.index + batch_size): - edge_index_step = np.concatenate((edge_index_step, self.edge_index[i] + max_edge_index), 1) - max_edge_index = np.max(edge_index_step) + 1 - edge_num = edge_index_step.shape[1] - - ######################### padding - edge_index_step = self.pad_zero_to_end(edge_index_step, 1, self.max_edge_num_global - edge_num) - node_batch_step = self.pad_zero_to_end(node_batch_step, 0, self.max_node_num_global - node_num) - - ######################### mask - node_mask = self.gen_mask(self.max_node_num_global, node_num) - edge_mask = self.gen_mask(self.max_edge_num_global, edge_num) - batch_size_mask = self.gen_mask(self.batch_size, batch_size) - - ######################### make Tensor - edge_index_step = Tensor(edge_index_step, ms.int32) - node_batch_step = Tensor(node_batch_step, ms.int32) - node_mask = Tensor(node_mask) - edge_mask = Tensor(edge_mask) - batch_size_mask = Tensor(batch_size_mask) - - return CommonData(edge_index_step, node_batch_step, node_mask, edge_mask, batch_size_mask, node_num, edge_num, - batch_size).get_tuple_data() - - def gen_node_attr(self, node_attr, batch_size, node_num): - """gen_node_attr""" - node_attr_step = np.concatenate(node_attr[self.index:self.index + batch_size], 0) - node_attr_step = self.pad_zero_to_end(node_attr_step, 0, self.max_node_num_global - node_num) - node_attr_step = Tensor(node_attr_step) - return node_attr_step - - def gen_edge_attr(self, edge_attr, batch_size, edge_num): - """gen_edge_attr""" - edge_attr_step = np.concatenate(edge_attr[self.index:self.index + batch_size], 0) - edge_attr_step = self.pad_zero_to_end(edge_attr_step, 0, self.max_edge_num_global - edge_num) - edge_attr_step = Tensor(edge_attr_step) - return edge_attr_step - - def gen_global_attr(self, global_attr, batch_size): - """gen_global_attr""" - global_attr_step = np.stack(global_attr[self.index:self.index + batch_size], 0) - global_attr_step = self.pad_zero_to_end(global_attr_step, 0, self.batch_size - batch_size) - global_attr_step = Tensor(global_attr_step) - return global_attr_step - - def add_step_index(self, batch_size): - """add_step_index""" - self.index = self.index + batch_size - self.step += 1 - -class CommonData: - """CommonData""" - def __init__(self, edge_index_step, node_batch_step, node_mask, edge_mask, batch_size_mask, node_num, edge_num, - batch_size): - self.tuple_data = (edge_index_step, node_batch_step, node_mask, edge_mask, batch_size_mask, node_num, edge_num, - batch_size) - - def get_tuple_data(self): - """get_tuple_data""" - return self.tuple_data diff --git a/MindChem/applications/deephe3nn/mindchemistry/graph/normlization.py b/MindChem/applications/deephe3nn/mindchemistry/graph/normlization.py deleted file mode 100644 index caccfdb42..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/graph/normlization.py +++ /dev/null @@ -1,278 +0,0 @@ -# Copyright 2024 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""norm""" -import mindspore as ms -from mindspore import ops, Parameter, nn -from .graph import AggregateNodeToGlobal, LiftGlobalToNode - - -class BatchNormMask(nn.Cell): - """BatchNormMask""" - - def __init__(self, num_features, eps=1e-5, momentum=0.9, affine=True): - super().__init__() - self.num_features = num_features - self.eps = eps - self.momentum = momentum - self.affine = affine - self.moving_mean = Parameter(ops.zeros((num_features,), ms.float32), name="moving_mean", requires_grad=False) - self.moving_variance = Parameter(ops.ones((num_features,), ms.float32), - name="moving_variance", - requires_grad=False) - if affine: - self.gamma = Parameter(ops.ones((num_features,), ms.float32), name="gamma", requires_grad=True) - self.beta = Parameter(ops.zeros((num_features,), ms.float32), name="beta", requires_grad=True) - - def construct(self, x, mask, num): - """construct""" - if x.shape[1] != self.num_features: - raise ValueError(f"x.shape[1] {x.shape[1]} is not equal to num_features {self.num_features}") - if x.shape[0] != mask.shape[0]: - raise ValueError(f"x.shape[0] {x.shape[0]} is not equal to mask.shape[0] {mask.shape[0]}") - - if x.ndim != mask.ndim: - if mask.size != mask.shape[0]: - raise ValueError("mask.ndim dose not match src.ndim, and cannot be broadcasted to the same") - shape = [1] * x.ndim - shape[0] = -1 - mask = ops.reshape(mask, shape).astype(x.dtype) - x = ops.mul(x, mask) - - # pylint: disable=R1705 - if x.ndim > 2: - norm_axis = [] - shape = [-1] - for i in range(2, x.ndim): - norm_axis.append(i) - shape.append(1) - - if self.training: - mean = ops.div(ops.sum(x, 0), num) - mean = ops.mean(mean, norm_axis) - self.moving_mean = self.momentum * self.moving_mean + (1 - self.momentum) * mean - mean = ops.reshape(mean, shape) - mean = ops.mul(mean, mask) - x = x - mean - - var = ops.div(ops.sum(ops.pow(x, 2), 0), num) - var = ops.mean(var, norm_axis) - self.moving_variance = self.momentum * self.moving_variance + (1 - self.momentum) * var - std = ops.sqrt(ops.add(var, self.eps)) - std = ops.reshape(std, shape) - y = ops.true_divide(x, std) - else: - mean = ops.reshape(self.moving_mean.astype(x.dtype), shape) - mean = ops.mul(mean, mask) - std = ops.sqrt(ops.add(self.moving_variance.astype(x.dtype), self.eps)) - std = ops.reshape(std, shape) - y = ops.true_divide(ops.sub(x, mean), std) - - if self.affine: - gamma = ops.reshape(self.gamma.astype(x.dtype), shape) - beta = ops.reshape(self.beta.astype(x.dtype), shape) * mask - y = y * gamma + beta - - return y - else: - if self.training: - mean = ops.div(ops.sum(x, 0), num) - self.moving_mean = self.momentum * self.moving_mean + (1 - self.momentum) * mean - mean = ops.mul(mean, mask) - x = x - mean - - var = ops.div(ops.sum(ops.pow(x, 2), 0), num) - self.moving_variance = self.momentum * self.moving_variance + (1 - self.momentum) * var - std = ops.sqrt(ops.add(var, self.eps)) - y = ops.true_divide(x, std) - else: - mean = ops.mul(self.moving_mean.astype(x.dtype), mask) - std = ops.sqrt(ops.add(self.moving_variance.astype(x.dtype), self.eps)) - y = ops.true_divide(ops.sub(x, mean), std) - - if self.affine: - beta = self.beta.astype(x.dtype) * mask - y = y * self.gamma.astype(x.dtype) + beta - - return y - - -class GraphLayerNormMask(nn.Cell): - """GraphLayerNormMask""" - - def __init__(self, - normalized_shape, - begin_norm_axis=-1, - eps=1e-5, - sub_mean=True, - divide_std=True, - affine_weight=True, - affine_bias=True, - aggr_mode="mean"): - super().__init__() - self.normalized_shape = normalized_shape - self.begin_norm_axis = begin_norm_axis - self.eps = eps - self.sub_mean = sub_mean - self.divide_std = divide_std - self.affine_weight = affine_weight - self.affine_bias = affine_bias - self.mean = ops.ReduceMean(keep_dims=True) - self.aggregate = AggregateNodeToGlobal(mode=aggr_mode) - self.lift = LiftGlobalToNode(mode="multi_graph") - - if affine_weight: - self.gamma = Parameter(ops.ones(normalized_shape, ms.float32), name="gamma", requires_grad=True) - if affine_bias: - self.beta = Parameter(ops.zeros(normalized_shape, ms.float32), name="beta", requires_grad=True) - - def construct(self, x, batch, mask, dim_size, scale=None): - """construct""" - begin_norm_axis = self.begin_norm_axis if self.begin_norm_axis >= 0 else self.begin_norm_axis + x.ndim - if begin_norm_axis not in range(1, x.ndim): - raise ValueError(f"begin_norm_axis {begin_norm_axis} is not in range 1 to {x.ndim}") - - norm_axis = [] - for i in range(begin_norm_axis, x.ndim): - norm_axis.append(i) - if self.normalized_shape[i - begin_norm_axis] != x.shape[i]: - raise ValueError(f"x.shape[{i}] {x.shape[i]} is not equal to normalized_shape[{i - begin_norm_axis}] " - f"{self.normalized_shape[i - begin_norm_axis]}") - - if x.shape[0] != mask.shape[0]: - raise ValueError(f"x.shape[0] {x.shape[0]} is not equal to mask.shape[0] {mask.shape[0]}") - if x.shape[0] != batch.shape[0]: - raise ValueError(f"x.shape[0] {x.shape[0]} is not equal to batch.shape[0] {batch.shape[0]}") - - if x.ndim != mask.ndim: - if mask.size != mask.shape[0]: - raise ValueError("mask.ndim dose not match src.ndim, and cannot be broadcasted to the same") - shape = [1] * x.ndim - shape[0] = -1 - mask = ops.reshape(mask, shape).astype(x.dtype) - x = ops.mul(x, mask) - - if self.sub_mean: - mean = self.aggregate(x, batch, dim_size=dim_size, mask=mask) - mean = self.mean(mean, norm_axis) - mean = self.lift(mean, batch) - mean = ops.mul(mean, mask) - x = x - mean - - if self.divide_std: - var = self.aggregate(ops.square(x), batch, dim_size=dim_size, mask=mask) - var = self.mean(var, norm_axis) - if scale is not None: - var = var * scale - std = ops.sqrt(var + self.eps) - std = self.lift(std, batch) - x = ops.true_divide(x, std) - - if self.affine_weight: - x = x * self.gamma.astype(x.dtype) - - if self.affine_bias: - beta = ops.mul(self.beta.astype(x.dtype), mask) - x = x + beta - - return x - - -class GraphInstanceNormMask(nn.Cell): - """GraphInstanceNormMask""" - - def __init__(self, - num_features, - eps=1e-5, - sub_mean=True, - divide_std=True, - affine_weight=True, - affine_bias=True, - aggr_mode="mean"): - super().__init__() - self.num_features = num_features - self.eps = eps - self.sub_mean = sub_mean - self.divide_std = divide_std - self.affine_weight = affine_weight - self.affine_bias = affine_bias - self.mean = ops.ReduceMean(keep_dims=True) - self.aggregate = AggregateNodeToGlobal(mode=aggr_mode) - self.lift = LiftGlobalToNode(mode="multi_graph") - - if affine_weight: - self.gamma = Parameter(ops.ones((self.num_features,), ms.float32), name="gamma", requires_grad=True) - if affine_bias: - self.beta = Parameter(ops.zeros((self.num_features,), ms.float32), name="beta", requires_grad=True) - - def construct(self, x, batch, mask, dim_size, scale=None): - """construct""" - if x.shape[1] != self.num_features: - raise ValueError(f"x.shape[1] {x.shape[1]} is not equal to num_features {self.num_features}") - if x.shape[0] != mask.shape[0]: - raise ValueError(f"x.shape[0] {x.shape[0]} is not equal to mask.shape[0] {mask.shape[0]}") - if x.shape[0] != batch.shape[0]: - raise ValueError(f"x.shape[0] {x.shape[0]} is not equal to batch.shape[0] {batch.shape[0]}") - - if x.ndim != mask.ndim: - if mask.size != mask.shape[0]: - raise ValueError("mask.ndim dose not match src.ndim, and cannot be broadcasted to the same") - shape = [1] * x.ndim - shape[0] = -1 - mask = ops.reshape(mask, shape).astype(x.dtype) - x = ops.mul(x, mask) - gamma = None # 后来添加,防止未定义报错 - if x.ndim > 2: - norm_axis = [] - shape = [-1] - for i in range(2, x.ndim): - norm_axis.append(i) - shape.append(1) - - if self.affine_weight: - gamma = ops.reshape(self.gamma.astype(x.dtype), shape) - if self.affine_bias: - beta = ops.reshape(self.beta.astype(x.dtype), shape) - else: - if self.affine_weight: - gamma = self.gamma.astype(x.dtype) - if self.affine_bias: - beta = self.beta.astype(x.dtype) - - if self.sub_mean: - mean = self.aggregate(x, batch, dim_size=dim_size, mask=mask) - if x.ndim > 2: - mean = self.mean(mean, norm_axis) - mean = self.lift(mean, batch) - mean = ops.mul(mean, mask) - x = x - mean - - if self.divide_std: - var = self.aggregate(ops.square(x), batch, dim_size=dim_size, mask=mask) - if x.ndim > 2: - var = self.mean(var, norm_axis) - if scale is not None: - var = var * scale - std = ops.sqrt(var + self.eps) - std = self.lift(std, batch) - x = ops.true_divide(x, std) - - if self.affine_weight: - x = x * gamma - - if self.affine_bias: - beta = ops.mul(beta, mask) - x = x + beta - - return x diff --git a/MindChem/applications/deephe3nn/mindchemistry/utils/__init__.py b/MindChem/applications/deephe3nn/mindchemistry/utils/__init__.py deleted file mode 100644 index 7e2dda763..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/utils/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this filepio[] except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""init""" -from .load_config import load_yaml_config - -__all__ = ["load_yaml_config"] diff --git a/MindChem/applications/deephe3nn/mindchemistry/utils/check_func.py b/MindChem/applications/deephe3nn/mindchemistry/utils/check_func.py deleted file mode 100644 index 711a441fe..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/utils/check_func.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""functions""" -from __future__ import absolute_import - -from mindspore import context - -_SPACE = " " - - -def _convert_to_tuple(params): - if params is None: - return params - if not isinstance(params, (list, tuple)): - params = (params,) - if isinstance(params, list): - params_out = tuple(params) - else: - params_out = params # ✅ 防止未定义 - return params_out - - -def check_param_type(param, param_name, data_type=None, exclude_type=None): - """Check parameter's data type""" - data_type = _convert_to_tuple(data_type) - exclude_type = _convert_to_tuple(exclude_type) - - if data_type and not isinstance(param, data_type): - raise TypeError( - f"The type of {param_name} should be instance of {data_type}, but got {param} with type {type(param)}" - ) - if exclude_type and type(param) in exclude_type: - raise TypeError( - f"The type of {param_name} should not be instance of {exclude_type},but got {param} with type {type(param)}" - ) - - -def check_param_value(param, param_name, valid_value): - """check parameter's value""" - valid_value = _convert_to_tuple(valid_value) - if param not in valid_value: - raise ValueError(f"The value of {param_name} should be in {valid_value}, but got {param}") - - -def check_param_type_value(param, param_name, valid_value, data_type=None, exclude_type=None): - """check both data type and value""" - check_param_type(param, param_name, data_type=data_type, exclude_type=exclude_type) - check_param_value(param, param_name, valid_value) - - -def check_dict_type(param_dict, param_name, key_type=None, value_type=None): - """check data type for key and value of the specified dict""" - check_param_type(param_dict, param_name, data_type=dict) - - for key in param_dict.keys(): - if key_type: - check_param_type(key, _SPACE.join(("key of", param_name)), data_type=key_type) - if value_type: - values = _convert_to_tuple(param_dict[key]) - for value in values: - check_param_type(value, _SPACE.join(("value of", param_name)), data_type=value_type) - - -def check_dict_value(param_dict, param_name, key_value=None, value_value=None): - """check values for key and value of specified dict""" - check_param_type(param_dict, param_name, data_type=dict) - - for key in param_dict.keys(): - if key_value: - check_param_value(key, _SPACE.join(("key of", param_name)), key_value) - if value_value: - values = _convert_to_tuple(param_dict[key]) - for value in values: - check_param_value(value, _SPACE.join(("value of", param_name)), value_value) - - -def check_dict_type_value(param_dict, param_name, key_type=None, value_type=None, key_value=None, value_value=None): - """check values for key and value of specified dict""" - check_dict_type(param_dict, param_name, key_type=key_type, value_type=value_type) - check_dict_value(param_dict, param_name, key_value=key_value, value_value=value_value) - - -def check_mode(api_name): - """check running mode""" - if context.get_context("mode") == context.PYNATIVE_MODE: - raise RuntimeError(f"{api_name} is only supported GRAPH_MODE now but got PYNATIVE_MODE") - - -def check_param_no_greater(param, param_name, compared_value): - """ Check whether the param less than the given compared_value""" - if param > compared_value: - raise ValueError(f"The value of {param_name} should be no greater than {compared_value}, but got {param}") - - -def check_param_odd(param, param_name): - """ Check whether the param is an odd number""" - if param % 2 == 0: - raise ValueError(f"The value of {param_name} should be an odd number, but got {param}") - - -def check_param_even(param, param_name): - """ Check whether the param is an even number""" - for value in param: - if value % 2 != 0: - raise ValueError(f"The value of {param_name} should be an even number, but got {param}") - - -def check_lr_param_type_value(param, param_name, param_type, thresh_hold=0, restrict=False, exclude=None): - if (exclude and isinstance(param, exclude)) or not isinstance(param, param_type): - raise TypeError(f"the type of {param_name} should be {param_type}, but got {type(param)}") - if restrict: - if param <= thresh_hold: - raise ValueError(f"the value of {param_name} should be > {thresh_hold}, but got: {param}") - else: - if param < thresh_hold: - raise ValueError(f"the value of {param_name} should be >= {thresh_hold}, but got: {param}") diff --git a/MindChem/applications/deephe3nn/mindchemistry/utils/load_config.py b/MindChem/applications/deephe3nn/mindchemistry/utils/load_config.py deleted file mode 100644 index 3ddc76e42..000000000 --- a/MindChem/applications/deephe3nn/mindchemistry/utils/load_config.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright 2022 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -""" -utility functions -""" -import os -import yaml - - -def _make_paths_absolute(dir_, config): - """ - Make all values for keys ending with `_path` absolute to dir_. - - Args: - dir_ (str): The path of yaml configuration file. - config (dict): The yaml for configuration file. - - Returns: - Dict. The configuration information in dict format. - """ - for key in config.keys(): - if key.endswith("_path"): - config[key] = os.path.join(dir_, config[key]) - config[key] = os.path.abspath(config[key]) - if isinstance(config[key], dict): - config[key] = _make_paths_absolute(dir_, config[key]) - return config - - -def load_yaml_config(file_path): - """ - Load a YAML configuration file. - - Args: - file_path (str): The path of yaml configuration file. - - Returns: - Dict. The configuration information in dict format. - - Supported Platforms: - ``Ascend`` ``CPU`` ``GPU`` - - Examples: - >>> from mindchemistry.utils import load_yaml_config - >>> config_file_path = 'xxx' # 'xxx' is the file_path - >>> configs = load_yaml_config(config_file_path) - """ - # Read YAML experiment definition file - with open(file_path, 'r', encoding='utf-8') as stream: - config = yaml.safe_load(stream) - config = _make_paths_absolute(os.path.join( - os.path.dirname(file_path), ".."), config) - return config - - -def load_yaml_config_from_path(file_path): - """ - Load a YAML configuration file. - - Args: - file_path (str): The path of yaml configuration file. - - Returns: - Dict. The configuration information in dict format. - - Supported Platforms: - ``Ascend`` ``CPU`` ``GPU`` - """ - # Read YAML experiment definition file - with open(file_path, 'r', encoding='utf-8') as stream: - config = yaml.safe_load(stream) - - return config diff --git a/MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/e3modules.py b/MindChem/applications/deephe3nn/models/e3modules.py similarity index 99% rename from MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/e3modules.py rename to MindChem/applications/deephe3nn/models/e3modules.py index 371cbce3f..1af1313bf 100644 --- a/MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/e3modules.py +++ b/MindChem/applications/deephe3nn/models/e3modules.py @@ -15,17 +15,20 @@ """ e3modules """ -import mindspore as ms import numpy as np +import mindspore as ms from mindspore import Parameter, ParameterTuple, Tensor, jit_class, nn, ops -from mindscience.e3nn.deephe3nn.utils import irreps_from_l1l2 -from mindscience.e3nn.graph import AggregateNodeToGlobal, LiftGlobalToNode + +from graph.graph import AggregateNodeToGlobal, LiftGlobalToNode + from mindscience.e3nn.nn.gate import _Extract from mindscience.e3nn.o3.irreps import Irreps from mindscience.e3nn.o3.sub import Linear from mindscience.e3nn.o3.tensor_product import TensorProduct from mindscience.e3nn.o3.wigner import wigner_3j +from .utils import irreps_from_l1l2 + class SkipConnection(nn.Cell): """ diff --git a/MindChem/applications/deephe3nn/models/kernel.py b/MindChem/applications/deephe3nn/models/kernel.py index dcd882608..73ccdcc07 100644 --- a/MindChem/applications/deephe3nn/models/kernel.py +++ b/MindChem/applications/deephe3nn/models/kernel.py @@ -21,19 +21,18 @@ import time import mindspore as ms import mindspore.dataset as ds +import numpy as np from mindspore import nn, ops from mindspore.amp import DynamicLossScaler -import numpy as np from data.data import AijData -from mindchemistry.cell.deephe3nn.e3modules import E3TensorDecompNet -from mindchemistry.cell.deephe3nn.model import Net -from mindchemistry.cell.deephe3nn.utils import (LossRecord, RevertDecayLR, - process_targets, - set_random_seed) -from mindchemistry.graph.loss import L2LossMask +from graph.loss import L2LossMask from models.parse_configs import BaseConfig, EvalConfig, TrainConfig +from .e3modules import E3TensorDecompNet +from .model import Net +from .utils import LossRecord, RevertDecayLR, process_targets, set_random_seed + class DatasetInfo: """ diff --git a/MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/model.py b/MindChem/applications/deephe3nn/models/model.py similarity index 98% rename from MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/model.py rename to MindChem/applications/deephe3nn/models/model.py index 29089007d..f267c556e 100644 --- a/MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/model.py +++ b/MindChem/applications/deephe3nn/models/model.py @@ -25,19 +25,20 @@ import mindspore as ms from mindspore import nn, ops from mindspore.common.initializer import Uniform -from mindchemistry.graph.graph import Aggregate, LiftNodeToEdge -from mindchemistry.so2_conv import SO2Convolution, SO3Rotation -from mindchemistry.so2_conv.init_edge_rot_mat import init_edge_rot_mat -from mindchemistry.cell.deephe3nn.e3modules import ( - E3ElementWise, E3LayerNorm, SelfTp, SeparateWeightTensorProduct, - SkipConnection, SortIrreps) -from mindchemistry.cell.deephe3nn.utils import GaussianBasis, tp_path_exists +from graph.graph import Aggregate, LiftNodeToEdge -from mindscience.e3nn.o3.sub import (FullyConnectedTensorProduct, Linear, - LinearBias) from mindscience.e3nn.nn.gate import Gate from mindscience.e3nn.o3.irreps import Irreps from mindscience.e3nn.o3.spherical_harmonics import SphericalHarmonics +from mindscience.e3nn.o3.sub import (FullyConnectedTensorProduct, Linear, + LinearBias) +from mindscience.e3nn.so2_conv import SO2Convolution, SO3Rotation +from mindscience.e3nn.so2_conv.init_edge_rot_mat import init_edge_rot_mat + +from .e3modules import (E3ElementWise, E3LayerNorm, SelfTp, + SeparateWeightTensorProduct, SkipConnection, + SortIrreps) +from .utils import GaussianBasis, tp_path_exists epsilon = 1e-8 diff --git a/MindChem/applications/deephe3nn/models/parse_configs.py b/MindChem/applications/deephe3nn/models/parse_configs.py index ea48f1952..c34dcb7ee 100644 --- a/MindChem/applications/deephe3nn/models/parse_configs.py +++ b/MindChem/applications/deephe3nn/models/parse_configs.py @@ -24,9 +24,10 @@ from configparser import ConfigParser import mindspore as ms import numpy as np -from mindchemistry.cell.deephe3nn.utils import orbital_analysis from mindscience.e3nn.o3.irreps import Irreps +from .utils import orbital_analysis + class BaseConfig: """ diff --git a/MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/utils.py b/MindChem/applications/deephe3nn/models/utils.py similarity index 88% rename from MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/utils.py rename to MindChem/applications/deephe3nn/models/utils.py index 326a9c18a..01900a4eb 100644 --- a/MindChem/applications/deephe3nn/mindchemistry/cell/deephe3nn/utils.py +++ b/MindChem/applications/deephe3nn/models/utils.py @@ -20,12 +20,11 @@ import os import random import shutil +import numpy as np import mindspore as ms -from mindspore import nn, ops import mindspore.numpy as msnp +from mindspore import nn, ops from mindspore.train import ReduceLROnPlateau -import numpy as np -import sympy as sym from scipy import special as sp from scipy.optimize import brentq from mindscience.e3nn.o3.irreps import Irrep, Irreps @@ -84,55 +83,36 @@ def jn_zeros(n, k): return zerosj -def spherical_bessel_formulas(n): +class LossRecord: """ - Computes the sympy formulas for the spherical bessel functions up to order n (excluded) + LossRecord class """ - x = sym.symbols("x") - if x != 0: - f = [sym.sin(x) / x] - a = sym.sin(x) / x - else: - raise ValueError - for i in range(1, n): - if x != 0: - b = sym.diff(a, x) / x - else: - raise ValueError - f += [sym.simplify(b * (-x) ** i)] - a = sym.simplify(b) - return f + def __init__(self): + self.reset() + def reset(self): + """ + LossRecord class reset all the parameter + """ + self.last_val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 -def bessel_basis(n, k): - """ - Compute the sympy formulas for the normalized and rescaled spherical bessel functions up to - order n (excluded) and maximum frequency k (excluded). - """ + def update(self, val, num=1): + """ + LossRecord class update all the parameter + """ + self.last_val = val + self.sum += val * num + self.count += num + count_div = self.count + if count_div != 0: + self.avg = self.sum / count_div + else: + raise ValueError - zeros = jn_zeros(n, k) - normalizer = [] - for order in range(n): - normalizer_tmp = [] - for i in range(k): - normalizer_tmp += [0.5 * jn(zeros[order, i], order + 1) ** 2] - normalizer_tmp = np.divide(1, np.array(normalizer_tmp) ** 0.5) - normalizer += [normalizer_tmp] - - f = spherical_bessel_formulas(n) - x = sym.symbols("x") - bess_basis = [] - for order in range(n): - bess_basis_tmp = [] - for i in range(k): - bess_basis_tmp += [ - sym.simplify( - normalizer[order][i] * f[order].subs(x, zeros[order, i] * x) - ) - ] - bess_basis += [bess_basis_tmp] - return bess_basis class GaussianBasis(nn.Cell): @@ -187,77 +167,6 @@ class GaussianBasis(nn.Cell): return gauss -class MaskMSELoss(nn.Cell): - """ - Masked MSELoss class - """ - - def __init__(self) -> None: - pass - - def construct( - self, inputs: ms.Tensor, target: ms.Tensor, mask: ms.Tensor - ) -> ms.Tensor: - """ - MaskMSELoss class construct process - """ - mse = ops.pow(ops.abs(inputs - target), 2) - mse = ops.masked_select(mse, mask).mean() - - return mse - - -class MaskMAELoss(nn.Cell): - """ - Masked MSELoss class - """ - - def __init__(self) -> None: - pass - - def construct( - self, inputs: ms.Tensor, target: ms.Tensor, mask: ms.Tensor - ) -> ms.Tensor: - """ - Masked MSELoss class construct process - """ - mae = ops.abs(inputs - target) - mae = ops.masked_select(mae, mask).mean() - - return mae - - -class LossRecord: - """ - LossRecord class - """ - - def __init__(self): - self.reset() - - def reset(self): - """ - LossRecord class reset all the parameter - """ - self.last_val = 0 - self.avg = 0 - self.sum = 0 - self.count = 0 - - def update(self, val, num=1): - """ - LossRecord class update all the parameter - """ - self.last_val = val - self.sum += val * num - self.count += num - count_div = self.count - if count_div != 0: - self.avg = self.sum / count_div - else: - raise ValueError - - def convert2numpyt(original_dtype): """ convert the dtype to numpy dtype diff --git a/MindChem/applications/matformer/mindchemistry/so2_conv/so2.py b/MindChem/applications/matformer/mindchemistry/so2_conv/so2.py index 0c23b319e..f2a28c48b 100644 --- a/MindChem/applications/matformer/mindchemistry/so2_conv/so2.py +++ b/MindChem/applications/matformer/mindchemistry/so2_conv/so2.py @@ -20,22 +20,6 @@ from mindspore import ops, nn from mindscience.e3nn.o3 import Irreps -class Silu(nn.Cell): - """ - silu activation class - """ - - def __init__(self): - super().__init__() - self.sigmoid = nn.Sigmoid() - - def construct(self, x): - """ - silu activation class construct process - """ - return ops.mul(x, self.sigmoid(x)) - - class SO2MConvolution(nn.Cell): """ SO2 Convolution subnetwork diff --git a/mindscience/e3nn/README.md b/mindscience/e3nn/README.md new file mode 100644 index 000000000..2acff963f --- /dev/null +++ b/mindscience/e3nn/README.md @@ -0,0 +1,361 @@ +# E3NN:欧几里得神经网络 + +## 什么是 E3NN + +E3NN 是一个基于 MindSpore 框架的等变神经网络库,专注于处理三维空间数据,并在旋转变换下保持网络的一致性。 + +**核心优势**: + +- 旋转不变:分子旋转后预测结果保持不变 +- 数据效率高:减少对大量数据增强的依赖 +- 物理意义明确:遵循物理定律的对称性 + +## 基本概念 + +### 数据表示 + +E3NN 使用不可约表示(Irreps, Irreducible Representations)来描述不同类型的数据: + +- `0e`:标量(如温度、能量) +- `1o`:向量(如位置、速度) +- `2e`:张量(如应力) + +## 主要特性 + +### 1. 数据表示与操作 + +```python +from mindscience.e3nn import o3 + +# 创建不可约表示 +irreps = o3.Irreps("2x0e + 3x1o") # 2 个标量 + 3 个向量 +print(irreps.dim) # 总维度:2 + 9 = 11 + +# 生成随机数据 +x = irreps.randn(-1) +``` + +### 2. 张量积运算 + +```python +from mindscience.e3nn import o3 + +# 组合不同类型特征 +tp = o3.TensorProduct( + irreps_in1="2x1o", # 输入1:2 个向量 + irreps_in2="1x0e", # 输入2:1 个标量 + irreps_out="2x1o" # 输出:2 个向量 +) +``` + +### 3. 等变神经网络层 + +```python +from mindscience.e3nn import nn +import mindspore.ops as ops + +# 激活函数(仅作用于标量部分) +act = nn.Activation("3x0e + 2x1o", acts=[ops.tanh, None]) + +# 门控机制 +gate = nn.Gate( + irreps_scalars="8x0e", # 标量通道 + acts=[ops.tanh], # 标量激活函数 + irreps_gates="8x0e", # 门控标量 + act_gates=[ops.sigmoid], # 门控激活函数 + irreps_gated="8x1o" # 被门控的向量通道 +) +``` + +## 库结构 + +```text +mindscience.e3nn/ +├── o3/ # 基础数学与表示模块 +│ ├── irreps.py # 不可约表示(Irreps) +│ ├── tensor_product.py # 张量积运算 +│ ├── spherical_harmonics.py # 球谐函数 +│ ├── rotation.py # 旋转矩阵与角度运算 +│ ├── wigner.py # Wigner D 矩阵 +│ ├── norm.py # 等变范数计算 +│ └── sub.py # 子表示操作 +├── nn/ # 神经网络层模块 +│ ├── activation.py # 等变激活函数 +│ ├── gate.py # 门控机制 +│ ├── batchnorm.py # 等变批归一化 +│ ├── fc.py # 等变线性层(全连接) +│ ├── normact.py # 归一化-激活组合 +│ ├── one_hot.py # One-hot 编码 +│ └── scatter.py # 图聚合(scatter)操作 +├── so2_conv/ # SO(2) 卷积与边框旋转 +│ ├── __init__.py # 公共 API 导出 +│ ├── so2.py # SO2Convolution 及子模块 +│ ├── so3.py # SO3Rotation,嵌入旋转/逆旋转 +│ ├── wigner.py # Wigner D 分块构造 +│ └── init_edge_rot_mat.py# 由边向量构造旋转矩阵 +└── utils/ # 工具函数模块 + ├── batch_dot.py # 批量点积运算 + ├── func.py # 通用工具函数 + ├── initializer.py # 参数初始化器 + ├── linalg.py # 线性代数工具 + ├── ncon.py # 张量网络收缩 + ├── perm.py # 置换操作 + └── radius.py # 半径图构造工具 +``` + +### 模块详解 + +#### o3 模块(基础数学与表示) + +- `irreps.py`:不可约表示的数据类型与维度定义 +- `tensor_product.py`:等变张量积运算实现 +- `spherical_harmonics.py`:球谐函数计算 +- `rotation.py`:旋转矩阵生成、角度转换等操作 +- `wigner.py`:用于旋转变换的 Wigner D 矩阵计算 +- `norm.py`:等变范数计算 +- `sub.py`:子表示提取与操作 + +#### nn 模块(神经网络层) + +- `activation.py`:等变激活函数,仅作用于标量部分 +- `gate.py`:控制向量特征激活的门控机制 +- `batchnorm.py`:等变批归一化层 +- `fc.py`:等变线性层(全连接) +- `normact.py`:归一化与激活的组合层 +- `one_hot.py`:One-hot 编码工具 +- `scatter.py`:图神经网络中的聚合操作 + +#### utils 模块(工具函数) + +- `batch_dot.py`:高效批量点积操作 +- `func.py`:常用工具函数集合 +- `initializer.py`:网络参数初始化器 +- `linalg.py`:线性代数相关工具 +- `ncon.py`:张量网络收缩操作 +- `perm.py`:置换与转置操作 +- `radius.py`:构造半径图的辅助函数 + +#### so2_conv 模块(SO(2) 卷积) + +- 用途:在与边对齐的局部坐标系下,对磁量子数 `m` 通道进行等变卷积,保持沿边轴的 SO(2) 对称性。 + +**关键组件** + +- `SO2Convolution`:面向 `Irreps` 输入/输出的按 `m` 通道混合网络;区分 `m=0`(实值)与 `m>0`(成对通道),并按 `l` 汇组输出。 +- `SO3Rotation`:将边旋转矩阵转换为 Wigner D 分块,用于嵌入在局部坐标系与全局坐标系之间的旋转/逆旋转。 +- `init_edge_rot_mat`:从边向量稳健构造 3×3 旋转矩阵。 + +```python +from mindscience.e3nn.so2_conv import SO2Convolution, SO3Rotation +from mindscience.e3nn.so2_conv.init_edge_rot_mat import init_edge_rot_mat + +irreps_in = "2x0e + 1x1o" +irreps_out = "1x0e + 1x1o" +so2 = SO2Convolution(irreps_in, irreps_out) +so3 = SO3Rotation(lmax=1, irreps_in=irreps_in, irreps_out=irreps_out) + +# edge_vecs: [num_edges, 3] +edge_rot = init_edge_rot_mat(edge_vecs) # [num_edges, 3, 3] +wigner, wigner_inv = so3.set_wigner(edge_rot) + +embedding_local = so3.rotate(embedding_global, wigner) +out_tuple = so2(embedding_local, edge_attrs) # 使用边数(num_edges) +out_global = so3.rotate_inv(out_tuple, wigner_inv) +``` + +**说明** + +- `SO2Convolution` 接收已旋转到局部边框的嵌入(按输入 irreps 的元组),并从 `x_edge` 获取 `num_edges`。 +- `SO3Rotation.set_wigner` 基于边旋转生成 `l ∈ [0, lmax]` 的 Wigner D 分块。 +- 典型流程:构建边旋转 → Wigner D → 局部旋转 → SO(2) 卷积 → 逆旋转。 + +## 核心组件示例 + +### 1. 不可约表示(Irreps) + +```python +from mindscience.e3nn import o3 + +# 创建不可约表示 +irreps = o3.Irreps("2x0e + 3x1o + 1x2e") +print(irreps.dim) # 总维度:2 + 9 + 5 = 16 +print(irreps.ls) # 角动量量子数:[0, 1, 2] + +# 生成数据 +x = irreps.randn(-1) +``` + +### 2. 张量积运算 + +```python +from mindscience.e3nn import o3 + +# 全连接张量积 +tp = o3.TensorProduct( + irreps_in1="2x1o", # 输入1:向量 + irreps_in2="1x0e", # 输入2:标量 + irreps_out="2x1o" # 输出:向量 +) + +result = tp(x1, x2) # 默认 weight_mode="inner",无需手动提供权重 +``` + +### 3. 等变网络层 + +```python +from mindscience.e3nn import nn +import mindspore.ops as ops + +# 激活函数(仅作用于标量) +act = nn.Activation("3x0e", acts=[ops.tanh]) + +# 门控机制 +gate = nn.Gate( + irreps_scalars="2x0e", # 标量通道 + acts=[ops.tanh], # 标量激活函数 + irreps_gates="2x0e", # 门控标量 + act_gates=[ops.sigmoid], # 门控激活函数 + irreps_gated="2x1o" # 被门控的向量通道 +) + +# 批归一化 +bn = nn.BatchNorm("2x0e + 3x1o") +``` + +### 4. 球谐函数 + +```python +from mindscience.e3nn import o3 +import mindspore as ms + +# 计算球谐函数 +pos = ms.Tensor([[1.0, 0.0, 0.0]]) # 位置向量 +sh = o3.spherical_harmonics(l=2, x=pos, normalize=True) +``` + +### 5. 旋转与 Wigner D 矩阵 + +```python +from mindscience.e3nn import o3 +import mindspore as ms + +# 构造旋转矩阵 +alpha, beta, gamma = 0.1, 0.2, 0.3 # 欧拉角 +R = o3.angles_to_matrix(alpha, beta, gamma) + +# 对不可约表示施加旋转 +irreps = o3.Irreps("1x1o") # 一个向量 +x = irreps.randn(-1) +D = irreps.wigD_from_matrix(R) # Wigner D 矩阵 +x_rotated = D @ x # 旋转后的向量 +``` + +### 6. 等变线性层 + +```python +from mindscience.e3nn import o3 + +# 创建等变线性层 +linear = o3.Linear( + irreps_in="2x0e + 1x1o", # 输入:2 个标量 + 1 个向量 + irreps_out="1x0e + 2x1o" # 输出:1 个标量 + 2 个向量 +) + +# 前向计算 +x = o3.Irreps("2x0e + 1x1o").randn(-1) +y = linear(x) +``` + +### 7. 批归一化 + +```python +from mindscience.e3nn import nn + +# 等变批归一化 +bn = nn.BatchNorm("4x0e + 2x1o") + +# 应用归一化 +x = o3.Irreps("4x0e + 2x1o").randn(32, -1) # batch=32 +x_normalized = bn(x) +``` + +### 8. 范数计算 + +```python +from mindscience.e3nn import o3 + +# 针对不同不可约表示计算范数 +irreps = o3.Irreps("2x0e + 3x1o") +x = irreps.randn(-1) + +# 使用 Norm 计算范数 +norm_layer = o3.Norm(irreps) +norm_result = norm_layer(x) +``` + +### 9. Scatter 聚合 + +```python +from mindscience.e3nn import nn +import mindspore as ms + +# 图神经网络中的 scatter 聚合 +scatter = nn.Scatter(mode="add") # 支持:'add', 'sum', 'div', 'max', 'min', 'mul' + +# 示例 +src = ms.Tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]) # 源特征 +index = ms.Tensor([0, 0, 1], dtype=ms.int32) # 目标索引 +result = scatter(src, index, dim_size=2) +``` + +## 快速开始 + +### 基本使用流程 + +```python +from mindscience.e3nn import o3 +import mindspore as ms + +# 1. 定义数据类型 +irreps_in = o3.Irreps("3x0e + 2x1o") # 3 个标量 + 2 个向量 +irreps_out = o3.Irreps("1x0e") # 1 个标量输出 + +# 2. 创建等变层 +layer = o3.Linear(irreps_in, irreps_out) + +# 3. 前向传播 +x = irreps_in.randn(-1) # 生成输入数据 +y = layer(x) # 等变变换 + +print(f"Input dimension: {x.shape}") +print(f"Output dimension: {y.shape}") +``` + +### 构建简单网络 + +```python +from mindscience.e3nn import o3, nn +import mindspore as ms +import mindspore.nn as ms_nn +import mindspore.ops as ops + +class SimpleE3NN(ms_nn.Cell): + def __init__(self): + super().__init__() + # 特征抽取 + self.linear1 = o3.Linear("3x0e + 1x1o", "8x0e + 4x1o") + self.act = nn.Activation("8x0e + 4x1o", acts=[ops.tanh, None]) + + # 输出层 + self.linear2 = o3.Linear("8x0e + 4x1o", "1x0e") + + def construct(self, x): + x = self.linear1(x) + x = self.act(x) + return self.linear2(x) + +# 使用模型 +model = SimpleE3NN() +input_data = o3.Irreps("3x0e + 1x1o").randn(-1) +output = model(input_data) +``` \ No newline at end of file diff --git a/docs/architecture/mindscience/e3nn.md b/mindscience/e3nn/README_en.md similarity index 81% rename from docs/architecture/mindscience/e3nn.md rename to mindscience/e3nn/README_en.md index 00fbd4420..334b33ab3 100644 --- a/docs/architecture/mindscience/e3nn.md +++ b/mindscience/e3nn/README_en.md @@ -87,6 +87,12 @@ mindscience.e3nn/ │ ├── normact.py # Normalization-activation combinations │ ├── one_hot.py # One-hot encoding │ └── scatter.py # Scatter aggregation operations +├── so2_conv/ # SO(2) convolution and edge-frame rotation +│ ├── __init__.py # Public API exports +│ ├── so2.py # SO2Convolution and submodules +│ ├── so3.py # SO3Rotation and embedding rotate/invert +│ ├── wigner.py # Wigner D block construction +│ └── init_edge_rot_mat.py# Build edge rotation matrices from vectors └── utils/ # Utility functions module ├── batch_dot.py # Batch dot product operations ├── func.py # General utility functions @@ -129,6 +135,42 @@ mindscience.e3nn/ - **perm.py**: Permutation and transposition operations - **radius.py**: Utility functions for constructing radius graphs +- **radius.py**: Utility functions for constructing radius graphs + +#### so2_conv Module - SO(2) Convolution + +- **Purpose**: Perform equivariant convolution over magnetic quantum number `m` channels in an edge-aligned local frame, preserving SO(2) symmetry around the edge axis. + +**Key Components** + +- `SO2Convolution`: m-wise mixing network for inputs/outputs defined by `Irreps`. Splits `m=0` (real) and `m>0` (pair channels) and assembles per-`l` outputs. +- `SO3Rotation`: converts edge rotation matrices to Wigner D blocks and rotates embeddings to/from the local frame. +- `init_edge_rot_mat`: robust construction of 3×3 edge rotation matrices from edge vectors. + +```python +from mindscience.e3nn.so2_conv import SO2Convolution, SO3Rotation +from mindscience.e3nn.so2_conv.init_edge_rot_mat import init_edge_rot_mat + +irreps_in = "2x0e + 1x1o" +irreps_out = "1x0e + 1x1o" +so2 = SO2Convolution(irreps_in, irreps_out) +so3 = SO3Rotation(lmax=1, irreps_in=irreps_in, irreps_out=irreps_out) + +# edge_vecs: [num_edges, 3] +edge_rot = init_edge_rot_mat(edge_vecs) # [num_edges, 3, 3] +wigner, wigner_inv = so3.set_wigner(edge_rot) + +embedding_local = so3.rotate(embedding_global, wigner) +out_tuple = so2(embedding_local, edge_attrs) # edge_attrs gives num_edges +out_global = so3.rotate_inv(out_tuple, wigner_inv) +``` + +**Notes** + +- `SO2Convolution` expects rotated embedding as a tuple per input irreps and uses edge count from `x_edge` (`num_edges`). +- `SO3Rotation.set_wigner` produces Wigner D blocks for `l ∈ [0, lmax]` based on edge rotations. +- The pipeline is: build edge rotations → Wigner D → rotate to local frame → SO(2) convolution → rotate back. + ## Core Components ### 1. Irreducible Representations (Irreps) diff --git a/MindChem/applications/deephe3nn/mindchemistry/so2_conv/__init__.py b/mindscience/e3nn/so2_conv/__init__.py similarity index 94% rename from MindChem/applications/deephe3nn/mindchemistry/so2_conv/__init__.py rename to mindscience/e3nn/so2_conv/__init__.py index 915e5da57..1ea4a3bf5 100644 --- a/MindChem/applications/deephe3nn/mindchemistry/so2_conv/__init__.py +++ b/mindscience/e3nn/so2_conv/__init__.py @@ -15,5 +15,7 @@ """ init file """ -from .so2 import SO2Convolution from .so3 import SO3Rotation +from .so2 import SO2Convolution + +__all__ = ["SO3Rotation", "SO2Convolution"] diff --git a/MindChem/applications/deephe3nn/mindchemistry/so2_conv/init_edge_rot_mat.py b/mindscience/e3nn/so2_conv/init_edge_rot_mat.py similarity index 74% rename from MindChem/applications/deephe3nn/mindchemistry/so2_conv/init_edge_rot_mat.py rename to mindscience/e3nn/so2_conv/init_edge_rot_mat.py index b47b4a243..a05a2264b 100644 --- a/MindChem/applications/deephe3nn/mindchemistry/so2_conv/init_edge_rot_mat.py +++ b/mindscience/e3nn/so2_conv/init_edge_rot_mat.py @@ -15,8 +15,8 @@ """ file to get rotating matrix from edge distance vector """ -import mindspore.numpy as ms_np from mindspore import ops +import mindspore.numpy as ms_np def init_edge_rot_mat(edge_distance_vec): @@ -25,15 +25,12 @@ def init_edge_rot_mat(edge_distance_vec): """ epsilon = 0.00000001 edge_vec_0 = edge_distance_vec - edge_vec_0_distance = ops.sqrt(ops.maximum(ops.sum(edge_vec_0**2, dim=1), epsilon)) + edge_vec_0_distance = ops.sqrt(ops.maximum(ops.sum(edge_vec_0 ** 2, dim=1), epsilon)) # Make sure the atoms are far enough apart norm_x = ops.div(edge_vec_0, edge_vec_0_distance.view(-1, 1)) edge_vec_2 = ops.rand_like(edge_vec_0) - 0.5 - edge_vec_2 = ops.div( - edge_vec_2, - ops.sqrt(ops.maximum(ops.sum(edge_vec_2**2, dim=1), epsilon)).view(-1, 1), - ) + edge_vec_2 = ops.div(edge_vec_2, ops.sqrt(ops.maximum(ops.sum(edge_vec_2 ** 2, dim=1), epsilon)).view(-1, 1)) # Create two rotated copies of the random vectors in case the random vector is aligned with norm_x # With two 90 degree rotated vectors, at least one should not be aligned with norm_x edge_vec_2b = edge_vec_2.copy() @@ -45,32 +42,18 @@ def init_edge_rot_mat(edge_distance_vec): vec_dot_b = ops.abs(ops.sum(edge_vec_2b * norm_x, dim=1)).view(-1, 1) vec_dot_c = ops.abs(ops.sum(edge_vec_2c * norm_x, dim=1)).view(-1, 1) vec_dot = ops.abs(ops.sum(edge_vec_2 * norm_x, dim=1)).view(-1, 1) - edge_vec_2 = ops.where( - ops.broadcast_to(ops.gt(vec_dot, vec_dot_b), edge_vec_2b.shape), - edge_vec_2b, - edge_vec_2, - ) + edge_vec_2 = ops.where(ops.broadcast_to(ops.gt(vec_dot, vec_dot_b), edge_vec_2b.shape), edge_vec_2b, edge_vec_2) vec_dot = ops.abs(ops.sum(edge_vec_2 * norm_x, dim=1)).view(-1, 1) - edge_vec_2 = ops.where( - ops.broadcast_to(ops.gt(vec_dot, vec_dot_c), edge_vec_2c.shape), - edge_vec_2c, - edge_vec_2, - ) + edge_vec_2 = ops.where(ops.broadcast_to(ops.gt(vec_dot, vec_dot_c), edge_vec_2c.shape), edge_vec_2c, edge_vec_2) vec_dot = ops.abs(ops.sum(edge_vec_2 * norm_x, dim=1)) # Check the vectors aren't aligned norm_z = ms_np.cross(norm_x, edge_vec_2, axis=1) - norm_z = ops.div( - norm_z, ops.sqrt(ops.maximum(ops.sum(norm_z**2, dim=1, keepdim=True), epsilon)) - ) - norm_z = ops.div( - norm_z, ops.sqrt(ops.maximum(ops.sum(norm_z**2, dim=1), epsilon)).view(-1, 1) - ) + norm_z = ops.div(norm_z, ops.sqrt(ops.maximum(ops.sum(norm_z ** 2, dim=1, keepdim=True), epsilon))) + norm_z = ops.div(norm_z, ops.sqrt(ops.maximum(ops.sum(norm_z ** 2, dim=1), epsilon)).view(-1, 1)) norm_y = ms_np.cross(norm_x, norm_z, axis=1) - norm_y = ops.div( - norm_y, ops.sqrt(ops.maximum(ops.sum(norm_y**2, dim=1, keepdim=True), epsilon)) - ) + norm_y = ops.div(norm_y, ops.sqrt(ops.maximum(ops.sum(norm_y ** 2, dim=1, keepdim=True), epsilon))) # Construct the 3D rotation matrix norm_x = norm_x.view(-1, 3, 1) norm_y = -norm_y.view(-1, 3, 1) diff --git a/MindChem/applications/deephe3nn/mindchemistry/so2_conv/jd.pkl b/mindscience/e3nn/so2_conv/jd.pkl similarity index 100% rename from MindChem/applications/deephe3nn/mindchemistry/so2_conv/jd.pkl rename to mindscience/e3nn/so2_conv/jd.pkl diff --git a/MindChem/applications/deephe3nn/mindchemistry/so2_conv/so2.py b/mindscience/e3nn/so2_conv/so2.py similarity index 100% rename from MindChem/applications/deephe3nn/mindchemistry/so2_conv/so2.py rename to mindscience/e3nn/so2_conv/so2.py diff --git a/MindChem/applications/deephe3nn/mindchemistry/so2_conv/so3.py b/mindscience/e3nn/so2_conv/so3.py similarity index 100% rename from MindChem/applications/deephe3nn/mindchemistry/so2_conv/so3.py rename to mindscience/e3nn/so2_conv/so3.py diff --git a/MindChem/applications/deephe3nn/mindchemistry/so2_conv/wigner.py b/mindscience/e3nn/so2_conv/wigner.py similarity index 77% rename from MindChem/applications/deephe3nn/mindchemistry/so2_conv/wigner.py rename to mindscience/e3nn/so2_conv/wigner.py index c3e08615c..85d5153c7 100644 --- a/MindChem/applications/deephe3nn/mindchemistry/so2_conv/wigner.py +++ b/mindscience/e3nn/so2_conv/wigner.py @@ -17,27 +17,23 @@ wigner file """ # pylint: disable=C0103 +import os import pickle from mindspore import ops import mindspore as ms from mindscience.e3nn.utils.func import broadcast_args +jd = None +file_dir = os.path.dirname(os.path.abspath(__file__)) +pkl_path = os.path.join(file_dir, 'jd.pkl') + +with open(pkl_path, 'rb') as f: + jd = pickle.load(f) def wigner_D(lv, alpha, beta, gamma): """ - # Borrowed from e3nn @ 0.4.0: - # https://github.com/e3nn/e3nn/blob/0.4.0/e3nn/o3/_wigner.py#L10 - # jd is a list of tensors of shape (2l+1, 2l+1) - - # Borrowed from e3nn @ 0.4.0: - # https://github.com/e3nn/e3nn/blob/0.4.0/e3nn/o3/_wigner.py#L37 - # - # In 0.5.0, e3nn shifted to torch.matrix_exp which is significantly slower: - # https://github.com/e3nn/e3nn/blob/0.5.0/e3nn/o3/_wigner.py#L92 + wigner_D function that complies with mindspore.jit compilation """ - jd = None - with open("jd.pkl", "rb") as f: - jd = pickle.load(f) if not lv < len(jd): raise NotImplementedError( f"wigner D maximum l implemented is {len(jd) - 1}, send us an email to ask for more" -- Gitee