diff --git a/.jenkins/check/config/filter_linklint.txt b/.jenkins/check/config/filter_linklint.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2728de7d754319f9de271f3bbace132c7eb0ec39
--- /dev/null
+++ b/.jenkins/check/config/filter_linklint.txt
@@ -0,0 +1,5 @@
+# MindSPONGE
+# file directory: MindSPONGE/
+
+https://api.colabfold.com
+https://a3m.mmseqs.com
\ No newline at end of file
diff --git a/.jenkins/check/config/filter_pylint.txt b/.jenkins/check/config/filter_pylint.txt
index d71fee9a5063c6caa4c13fd3997afaa834ed09c3..d48f33eb81f01726d85149344bc28b9d97c3b8fa 100644
--- a/.jenkins/check/config/filter_pylint.txt
+++ b/.jenkins/check/config/filter_pylint.txt
@@ -3,6 +3,25 @@
"mindscience/MindElec/mindelec/common/lr_scheduler.py" "missing-docstring"
"mindscience/MindElec/mindelec/operators/derivatives.py" "missing-docstring"
#MindSPONGE
+"mindscience/MindSPONGE/applications/research/FAAST/extract_restraints.py" "broad-except"
+"mindscience/MindSPONGE/applications/research/FAAST/search.py" "expression-not-assigned"
+"mindscience/MindSPONGE/applications/research/FAAST/search.py" "logging-fstring-interpolation"
+"mindscience/MindSPONGE/applications/research/FAAST/search.py" "broad-except"
+"mindscience/MindSPONGE/applications/research/FAAST/assign/assign.py" "expression-not-assigned"
+"mindscience/MindSPONGE/applications/research/FAAST/assign/assign.py" "raising-bad-type"
+"mindscience/MindSPONGE/applications/research/FAAST/assign/assign.py" "broad-except"
+"mindscience/MindSPONGE/applications/research/FAAST/assign/assign.py" "no-else-return"
+"mindscience/MindSPONGE/applications/research/FAAST/commons/analysis.py" "no-else-raise"
+"mindscience/MindSPONGE/applications/research/FAAST/commons/analysis.py" "broad-except"
+"mindscience/MindSPONGE/applications/research/FAAST/commons/analysis.py" "consider-using-set-comprehension"
+"mindscience/MindSPONGE/applications/research/FAAST/data/dataset.py" "bad-staticmethod-argument"
+"mindscience/MindSPONGE/applications/research/FAAST/data/dataset.py" "unused-argument"
+"mindscience/MindSPONGE/applications/research/FAAST/data/dataset.py" "no-value-for-parameter"
+"mindscience/MindSPONGE/applications/research/FAAST/data/preprocess.py" "bad-continuation"
+"mindscience/MindSPONGE/applications/research/FAAST/nmr_relax/relax/amber_minimize.py" "no-else-return"
+"mindscience/MindSPONGE/applications/research/FAAST/nmr_relax/relax/relax.py" "useless-object-inheritance"
+"mindscience/MindSPONGE/applications/research/FAAST/run_rasp.py" "bad-whitespace"
+"mindscience/MindSPONGE/applications/research/FAAST/run_rasp.py" "broad-except"
"mindscience/MindSPONGE/mindsponge/python/__init__.py" "wrong-import-position"
"mindscience/MindSPONGE/mindsponge/python/common/units.py" "protected-access"
"mindscience/MindSPONGE/mindsponge/python/common/checkpoint.py" "protected-access"
@@ -113,6 +132,8 @@
"mindscience/MindFlow/mindflow/cfd/boundary_conditions/periodic.py" "useless-super-delegation"
"mindscience/MindFlow/mindflow/cfd/boundary_conditions/symmetry.py" "useless-super-delegation"
"mindscience/MindFlow/mindflow/cfd/space_solver/riemann_computer/rusanov.py" "unused-argument"
+"mindscience/MindFlow/mindflow/cfd/space_solver/riemann_computer/hllc.py" "unused-argument"
+"mindscience/MindFlow/mindflow/cfd/space_solver/riemann_computer/roe.py" "unused-argument"
"mindscience/MindSPONGE/applications/research/grover/src/model/layers.py" "invalid-name"
"mindscience/MindSPONGE/applications/research/UFold/train.py" "invalid-name"
"mindscience/MindSPONGE/applications/research/UFold/ascend_postprocess.py" "invalid-name"
diff --git a/.jenkins/check/config/whitelizard.txt b/.jenkins/check/config/whitelizard.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e68275370ee093499a925077bb78ddc2b3697179
--- /dev/null
+++ b/.jenkins/check/config/whitelizard.txt
@@ -0,0 +1,9 @@
+# Scene1:
+# function_name1, function_name2
+# Scene2:
+# file_path:function_name1, function_name2
+#
+
+## MindFlow
+mindscience/MindFlow/mindflow/cfd/space_solver/riemann_computer/roe.py:eigen_composition
+
diff --git a/.jenkins/test/config/elec_config/dependent_packages.yaml b/.jenkins/test/config/elec_config/dependent_packages.yaml
index 0a93d4c9262eb296a25b44eea64c04a9a16b6504..515e6fa7681f1c2dc46c5c36205e063a8a3cc3be 100644
--- a/.jenkins/test/config/elec_config/dependent_packages.yaml
+++ b/.jenkins/test/config/elec_config/dependent_packages.yaml
@@ -1,2 +1,2 @@
mindspore:
- '/mindspore/mindspore/daily/202303/20230308/master_20230308160032_45068d436d1462f4780945a29fe0e062d98f2bbb_newest/'
+ '/mindspore/mindspore/version/202302/20230228/master_20230228222023_4327f95b9195b322d7f43b367ab0a49c3003f0d6/'
diff --git a/.jenkins/test/config/sponge_config/dependent_packages.yaml b/.jenkins/test/config/sponge_config/dependent_packages.yaml
index bb7aadcb03c15bc7148490798e01e20c66b27484..98eabd50e242d2ba58f3fc83adfd8129f796a47d 100644
--- a/.jenkins/test/config/sponge_config/dependent_packages.yaml
+++ b/.jenkins/test/config/sponge_config/dependent_packages.yaml
@@ -1,2 +1,2 @@
mindspore:
- '/mindspore/mindspore/daily/202304/20230406/master_20230406121635_cf4b7fec8cc6ba18e5f30c9326a0e2bf2a7eb0fe_newest/'
+ 'mindspore/mindspore/version/202307/20230708/master_20230708143041_73d4f99b65f4f6986d874149d18deedab86c2de7/'
diff --git a/MindChemistry/CMakeLists.txt b/MindChemistry/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..23981ac28453d5c812ea4f3c71ef1e0fb142ebbc
--- /dev/null
+++ b/MindChemistry/CMakeLists.txt
@@ -0,0 +1,23 @@
+cmake_minimum_required(VERSION 3.14.1)
+project(MindChemistry)
+
+# set build options for project target
+include(${CMAKE_SOURCE_DIR}/cmake/options.cmake)
+
+
+find_package(Python3 COMPONENTS Interpreter Development)
+if(Python3_FOUND)
+ set(PYTHON_INCLUDE_DIRS "${Python3_INCLUDE_DIRS}")
+ set(PYTHON_LIBRARIES "${Python3_LIBRARIES}")
+else()
+ find_python_package(py_inc py_lib)
+ set(PYTHON_INCLUDE_DIRS "${py_inc}")
+ set(PYTHON_LIBRARIES "${py_lib}")
+endif()
+
+message("PYTHON_INCLUDE_DIRS = ${PYTHON_INCLUDE_DIRS}")
+message("PYTHON_LIBRARIES = ${PYTHON_LIBRARIES}")
+include_directories(${PYTHON_INCLUDE_DIRS})
+
+# packaging tool
+include(${CMAKE_SOURCE_DIR}/cmake/package.cmake)
diff --git a/MindChemistry/LICENSE b/MindChemistry/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..57bc88a15a0ee8266c259b2667e64608d3f7e292
--- /dev/null
+++ b/MindChemistry/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/MindChemistry/NOTICE b/MindChemistry/NOTICE
new file mode 100644
index 0000000000000000000000000000000000000000..99bc7743de241e23228759587cf91d1cc516fc03
--- /dev/null
+++ b/MindChemistry/NOTICE
@@ -0,0 +1,3 @@
+MindSpore MindChemistry
+Copyright 2019-2023 Huawei Technologies Co., Ltd
+
diff --git a/MindChemistry/README.md b/MindChemistry/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..71388763f297d600c435d6e3a6bc00dc4f1bf236
--- /dev/null
+++ b/MindChemistry/README.md
@@ -0,0 +1,144 @@
+ ENGLISH | [简体中文](README_CN.md)
+
+[](https://pypi.org/project/mindspore)
+[](https://badge.fury.io/py/mindspore)
+[](https://github.com/mindspore-ai/mindspore/blob/master/LICENSE)
+[](https://gitee.com/mindspore/mindscience/pulls)
+
+# **MindSpore Chemistry**
+
+- [**MindSpore Chemistry**](#mindspore-chemistry)
+ - [**Introduction**](#introduction)
+ - [**Latest News**](#latest-news)
+ - [**Features**](#features)
+ - [**Applications**](#applications)
+ - [**Modules**](#modules)
+ - [**Installation**](#installation)
+ - [**Version Dependency**](#version-dependency)
+ - [**Dependency**](#dependency)
+ - [**Hardware**](#hardware)
+ - [**source code install**](#source-code-install)
+ - [**Community**](#community)
+ - [**Core Contributor**](#core-contributor)
+ - [**Contribution Guide**](#contribution-guide)
+ - [**License**](#license)
+ - [**References**](#references)
+
+## **Introduction**
+
+Conventional chemistry studies have long been confronted with numerous challenges. The process of experimental design, synthesis, characterization, and analysis can be time-consuming, costly, and highly dependent on experts’ experiences.
+The synergy between AI and chemistry offers unprecedented opportunities to overcome the limitations of conventional approaches and unlock new frontiers in scientific discovery and innovation. AI techniques can efficiently process vast amount of data, mining underneath patterns and generating predictive models. By leveraging AI, chemistry and material science researchers can accelerate the design and optimization of chemical processes and the design and analysis of novel materials.
+
+**MindChemsitry** is a toolkit built on MindSpore endeavoring to integrate AI with conventional chemistry research. It supports multi-scale tasks including molecular generation, property prediction and synthesis optimization on multiple chemistry systems such as organic, inorganic and composites chemistry systems. MindChemistry dedicates to enabling the joint research of AI and chemistry with high efficiency, and seek to facilitate an innovative paradigm of joint research between AI and chemistry, providing experts with novel perspectives and efficient tools.
+
+
+
+
+
+## **Latest News**
+
+- 🔥`2023.06.16` MindChemistry 0.1.0-alpha is released.
+
+
+
+## **Features**
+
+### **Applications**
+
+- **Material Generation**
+ - **Scenario**:Inorganic chemistry
+ - **Dataset**:High-entropy alloy dataset. The high-entropy alloy dataset includes the chemical composition of known high-entropy alloys and thermodynamic properties of the alloys. It provides chemical composition information such as the metal element types and corresponding percentages as well as thermodynamic properties such as magnetostrictive effects and Curie temperatures.
+ - **Task**:High-entropy alloy composition design. We integrate Machine learning-enabled high-entropy alloy discovery[1] approach for designing novel high-entropy alloys with low thermal expansion coefficients(TEC) in active learning fashion. In the active learning circle, candidates of high-enropy alloys are firstly generated, then candidates with low TEC are identified by property predicting models and finally experimental validation are required for determine the composition of the novel high-entropy alloys.
+
+
+- **Property Prediction**:
+ - **Scenario**:Organic chemistry
+ - **Dataset**: Revised Molecular Dynamics 17(rMD17). rMD17 dataset includes molecular dynamics simulations of multiple organic chemical moleculars. It provides chemical desciptive information such as the atomic numbers and positions as well as molecular property information such as energies and forces.
+ - **Task**:Molecular energy prediction. We integrate eural Equivariant Interatomic Potentials(NequIP)[2] algorithman which uses graph descriptor given atomic numbers and atomic positions as input and predict the molecular energy based on equivariant graph neural network.
+
+
+### **Modules**
+- **Equivariant Computing**
+ - **Introduction**:Symmetry is an essential property in science domain. Equivarient neural network adopts intuitive representation as input and computing equivariently with respect to spatial rotation,shift and inversion. Adopting equivariant neural network for modeling scientific scenarios results in higher representation effectiveness for data and high efficiency for model training.
+ - **Functions**:E(3) computing modules integrates basic modules such as Irreps, Spherical Harmonics and Tensor Products. Based on the basic modules, equivariant neural network layers such as equivariant Activation, Linear and Convolution layers are provided for constructing user customed equivariant neural networks.
+
@@ -238,7 +258,7 @@ yufan, wangzidong, liuhongsheng, zhouhongye, zhangyi, dengzhiwen, liulei, liboka
## **贡献指南**
-- 如何贡献您的代码,请点击此处查看:[贡献指南](https://gitee.com/mindspore/mindscience/blob/master/CONTRIBUTION.md)
+- 如何贡献您的代码,请点击此处查看:[贡献指南](https://gitee.com/mindspore/mindscience/blob/master/MindFlow/CONTRIBUTION_CN.md)
## **许可证**
diff --git a/MindFlow/RELEASE.md b/MindFlow/RELEASE.md
index 3c5d759bf7ba7a8a4c1b3126f3f3b8a204094586..6fe96f34e0894abd5ed5d8ca0866a6911abc15b2 100644
--- a/MindFlow/RELEASE.md
+++ b/MindFlow/RELEASE.md
@@ -1,10 +1,10 @@
-# MindFlow Release Notes
-
-MindFlow is a flow simulation suite developed based on MindSpore. It supports AI flow simulation in industries such as aerospace, ship manufacturing, and energy and power. It aims to provide efficient and easy-to-use AI computing flow simulation software for industrial research engineers, university professors, and students.
+# MindSpore Flow Release Notes
[查看中文](./RELEASE_CN.md)
-## MindFlow 0.1.0.rc1 Release Notes
+## MindSpore Flow 0.1.0.rc1 Release Notes
+
+MindSpore Flow is a flow simulation suite developed based on MindSpore. It supports AI flow simulation in industries such as aerospace, ship manufacturing, and energy and power. It aims to provide efficient and easy-to-use AI computing flow simulation software for industrial research engineers, university professors, and students.
### Major Features and Improvements
diff --git a/MindFlow/RELEASE_CN.md b/MindFlow/RELEASE_CN.md
index 97f5ca5ba5988febf460497faf78f8307e1fa976..899cc3558d3fda3f2b0932009809d2dea77e18c7 100644
--- a/MindFlow/RELEASE_CN.md
+++ b/MindFlow/RELEASE_CN.md
@@ -1,10 +1,10 @@
-# MindFlow Release Notes
-
-MindFlow是基于昇思MindSpore开发的流体仿真领域套件,支持航空航天、船舶制造以及能源电力等行业领域的AI流场模拟,旨在于为广大的工业界科研工程人员、高校老师及学生提供高效易用的AI计算流体仿真软件。
+# MindSpore Flow Release Notes
[View English](./RELEASE.md)
-## MindFlow 0.1.0.rc1 Release Notes
+## MindSpore Flow 0.1.0.rc1 Release Notes
+
+MindSpore Flow是基于昇思MindSpore开发的流体仿真领域套件,支持航空航天、船舶制造以及能源电力等行业领域的AI流场模拟,旨在于为广大的工业界科研工程人员、高校老师及学生提供高效易用的AI计算流体仿真软件。
### 主要特性和增强
diff --git a/MindFlow/applications/cfd/lax/README.md b/MindFlow/applications/cfd/lax/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..516172bacd56b3e54c6de530df7e1a4dbba7a6f8
--- /dev/null
+++ b/MindFlow/applications/cfd/lax/README.md
@@ -0,0 +1,48 @@
+ENGLISH | [简体中文](README_CN.md)
+
+# Lax Tube Problem
+
+## Overview
+
+The Lax shock tube problem is a classic problem in fluid dynamics that involves the behavior of a shock wave in a tube filled with gas. The problem is used to test the accuracy of numerical methods for solving partial differential equations. In this case, MindFlow fluid simulation suite is used to solve the sod tube problem.
+
+## QuickStart
+
+### Run Option 1: Call `solve_lax.py` from command line
+
+```shell
+python solve_lax.py --mode GRAPH --save_graphs_path ./graphs --device_target GPU --device_id 0 --config_file_path ./numeric.yaml --reconstructor WENO5 --riemann_computer Roe
+```
+
+where:
+
+`--mode` is the running mode. 'GRAPH' indicates static graph mode. 'PYNATIVE' indicates dynamic graph mode. You can refer to [MindSpore official website](https://www.mindspore.cn/docs/en/r2.0/design/dynamic_graph_and_static_graph.html) for details.Default 'GRAPH'.
+
+`--save_graphs` indicates whether to save the computational graph. Default 'False'.
+
+`--save_graphs_path` indicates the path to save the computational graph. Default './graphs'.
+
+`--device_target` indicates the computing platform. You can choose 'Ascend' or 'GPU'. Default 'Ascend'.
+
+`--device_id` indicates the index of NPU or GPU. Default 0.
+
+`--config_file_path` indicates the path of the parameter file. Default './burgers_cfg.yaml'.
+
+`--reconstructor` indicates the reconstructor. You can choose 'WENO3', 'WENO5' or 'WENO7'. Default 'WENO5'
+
+`--riemann_computer` indicates the riemann computer. You can choose 'HLLC', 'Roe' or 'Rusanov'. Default 'Roe'
+
+### Run Option 2: Run Jupyter Notebook
+
+You can use [Chinese](./lax_tube_CN.ipynb) or [English](./lax_tube.ipynb) Jupyter Notebook to run the training and evaluation code line-by-line.
+
+## Results
+
+The following two figures depict the results of the Lax shock tube problem computed using different reconstruction schemes and Riemann computers. Firstly, the first figure demonstrates the outcomes obtained with various reconstruction schemes when the Roe Riemann computer is employed. Meanwhile, the second figure aims to illustrate the discrepancies resulting from different Riemann computers under the utilization of a fixed reconstruction scheme, namely WENO5. Here, the label "exact" represents the exact solution used as a reference for comparison. It should be noted that both figures exhibit certain oscillatory behavior in the computed results. This can be attributed to the adoption of a reconstruction approach based on physical space conservation variables, which may introduce some oscillations when performing high-order reconstruction in physical space. Accordingly, it can be observed from the results that WENO7 exhibits more pronounced oscillations compared to WENO3.
+
+
+
+
+## Contributor
+
+huxin2023
diff --git a/MindFlow/applications/cfd/lax/README_CN.md b/MindFlow/applications/cfd/lax/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..ec9ad809b3c65bea74076c20e24816a3877195ae
--- /dev/null
+++ b/MindFlow/applications/cfd/lax/README_CN.md
@@ -0,0 +1,47 @@
+[ENGLISH](README.md) | 简体中文
+
+# Lax 激波管问题
+
+## 概述
+
+Lax 激波管问题是流体动力学中的经典问题,以 Peter D. Lax 的名字命名。该问题用于测试求解偏微分方程的数值方法的准确性。本案例采用 MindFlow 流体模拟套件用于解决 Lax 激波管问题。
+
+## 快速开始
+
+### 训练方式一:在命令行中调用`solve_lax.py`脚本
+
+```shell
+python solve_lax.py --mode GRAPH --save_graphs_path ./graphs --device_target GPU --device_id 0 --config_file_path ./numeric.yaml --reconstructor WENO5 --riemann_computer Roe
+```
+
+其中,
+`--mode`表示运行的模式,'GRAPH'表示静态图模式, 'PYNATIVE'表示动态图模式,详见[MindSpore 官网](https://www.mindspore.cn/docs/zh-CN/r2.0/design/dynamic_graph_and_static_graph.html),默认值'GRAPH';
+
+`--save_graphs`表示是否保存计算图,默认值'False';
+
+`--save_graphs_path`表示计算图保存的路径,默认值'./graphs'
+
+`--device_target`表示使用的计算平台类型,可以选择'Ascend'或'GPU',默认值'GPU';
+
+`--device_id`表示使用的计算卡编号,可按照实际情况填写,默认值 0;
+
+`--config_file_path`表示配置文件的路径,默认值'./numeric.yaml';
+
+`--reconstructor`表示使用的重构格式,可以选择'WENO3'、'WENO5'或'WENO7',默认值'WENO5';
+
+`--riemann_computer`表示使用的 Riemann 求解器,可以选择'HLLC'、'Roe'或'Rusanov',默认值'Roe';
+
+### 训练方式二:运行 Jupyter Notebook
+
+您可以使用[中文版](./lax_tube_CN.ipynb)和[英文版](./lax_tube.ipynb)Jupyter Notebook 逐行运行训练和验证代码。
+
+## 结果展示
+
+下面的两幅图展示了针对 Lax 激波管问题采用不同的重构格式和 Riemann 求解器所计算得到的结果。首先是第一幅图,展示了在使用 Roe Riemann 求解器的情况下,采用不同的重构格式得到的结果。第二幅图旨在展示在使用固定重构格式 WENO5 的情况下,不同 Riemann 求解器之间的差异。其中,"exact"表示精确解,用作参考对比。需要注意的是,两幅图中的计算结果呈现出一定的振荡行为。这是因为本程序采用了基于物理空间的守恒变量重构,而在物理空间中进行高阶重构可能会引起一些振荡。从结果中也可以观察到,WENO7 的振荡比 WENO3 更为明显。
+
+
+
+
+## Contributor
+
+huxin2023
diff --git a/MindFlow/applications/cfd/lax/images/reconstructor.png b/MindFlow/applications/cfd/lax/images/reconstructor.png
new file mode 100644
index 0000000000000000000000000000000000000000..0c2fc0860defea7cc5a40a3e4e8d8778d59b8007
Binary files /dev/null and b/MindFlow/applications/cfd/lax/images/reconstructor.png differ
diff --git a/MindFlow/applications/cfd/lax/images/riemann_computer.png b/MindFlow/applications/cfd/lax/images/riemann_computer.png
new file mode 100644
index 0000000000000000000000000000000000000000..611f97a30adb44e3a7cfe7e78a4886249907856f
Binary files /dev/null and b/MindFlow/applications/cfd/lax/images/riemann_computer.png differ
diff --git a/MindFlow/applications/cfd/lax/lax_tube.ipynb b/MindFlow/applications/cfd/lax/lax_tube.ipynb
index cb470add9a9bac500c598b509fa2105671a0d21d..39320ac6e3aa0495308341c14d2bc33205dc42ca 100644
--- a/MindFlow/applications/cfd/lax/lax_tube.ipynb
+++ b/MindFlow/applications/cfd/lax/lax_tube.ipynb
@@ -55,9 +55,7 @@
"from mindflow.cfd.runtime import RunTime\n",
"from mindflow.cfd.simulator import Simulator\n",
"\n",
- "from src.ic import lax_ic_1d\n",
- "\n",
- "context.set_context(device_target=\"GPU\", device_id=3)"
+ "from src.ic import lax_ic_1d"
]
},
{
@@ -65,9 +63,17 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Define Simulator and RunTime\n",
+ "## Setting up the MindSpore Runtime Environment\n",
+ "\n",
+ "Before running the program, the context should be configured. The commonly used parameters inside `context.set_context` are described as follows:\n",
+ "\n",
+ "`mode` represents the execution mode. 'GRAPH' indicates the static graph mode, 'PYNATIVE' indicates the dynamic graph mode. For more details, please refer to the [MindSpore official website](https://www.mindspore.cn/docs/en/r2.0/design/dynamic_graph_and_static_graph.html?highlight=pynative). The default value is 'GRAPH'.\n",
"\n",
- "The mesh, material, runtime, boundary conditions and numerical methods are defined in [numeric.yaml](https://gitee.com/mindspore/mindscience/blob/master/MindFlow/applications/cfd/lax/numeric.yaml)."
+ "`save_graphs` indicates whether to save the computation graph. The default value is 'False'.\n",
+ "\n",
+ "`device_target` represents the type of computing platform to be used, which can be either 'Ascend' or 'GPU'. The default value is 'GPU'.\n",
+ "\n",
+ "`device_id` represents the number of the computing card to be used. It can be filled in according to the actual situation. The default value is 0."
]
},
{
@@ -75,8 +81,47 @@
"execution_count": 2,
"metadata": {},
"outputs": [],
+ "source": [
+ "context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target=\"GPU\", device_id=0)"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Reading Configuration File\n",
+ "\n",
+ "This program provides multiple options for configuring the grid, materials, simulation time, boundary conditions, and numerical methods. These configurations can be set in the file named [numeric.yaml](./numeric.yaml). Users can choose different numerical methods according to their needs. The program supports the following numerical methods: WENO3, WENO5, and WENO7 for reconstruction, and Rsuanov, HLLC, and Roe for Riemann solvers.\n",
+ "\n",
+ "In addition to directly setting the configurations in the file, you can also modify the following code to select the desired numerical methods. In the code block below, the second and third lines are where the numerical methods are set. If you prefer to specify the numerical methods directly in the configuration file, you can comment out these two lines of code."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
"source": [
"config = load_yaml_config('numeric.yaml')\n",
+ "config[\"space_solver\"][\"convective_flux\"][\"reconstructor\"] = \"WENO5\"\n",
+ "config[\"space_solver\"][\"convective_flux\"][\"riemann_computer\"] = \"Roe\""
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Define Simulator and RunTime\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
"simulator = Simulator(config)\n",
"runtime = RunTime(config['runtime'], simulator.mesh_info, simulator.material)"
]
@@ -93,7 +138,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@@ -114,7 +159,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 6,
"metadata": {},
"outputs": [
{
@@ -122,40 +167,152 @@
"output_type": "stream",
"text": [
"current time = 0.000000, time step = 0.001117\n",
- "current time = 0.001117, time step = 0.001107\n",
- "current time = 0.002224, time step = 0.001072\n",
- "current time = 0.003296, time step = 0.001035\n",
- "current time = 0.004332, time step = 0.001016\n",
- "current time = 0.005348, time step = 0.001008\n",
- "current time = 0.006356, time step = 0.000991\n",
- "current time = 0.007347, time step = 0.000976\n",
- "current time = 0.008324, time step = 0.000966\n",
- "current time = 0.009290, time step = 0.000960\n",
- "current time = 0.010250, time step = 0.000957\n",
- "current time = 0.011207, time step = 0.000954\n",
- "current time = 0.012161, time step = 0.000953\n",
- "current time = 0.013113, time step = 0.000952\n",
- "current time = 0.014066, time step = 0.000952\n",
- "current time = 0.015017, time step = 0.000951\n",
- "current time = 0.015969, time step = 0.000951\n",
- "current time = 0.016920, time step = 0.000952\n",
- "current time = 0.017872, time step = 0.000951\n",
- "current time = 0.018823, time step = 0.000951\n",
- "current time = 0.019775, time step = 0.000952\n",
- "current time = 0.020726, time step = 0.000953\n",
- "current time = 0.021679, time step = 0.000952\n",
- "current time = 0.022631, time step = 0.000952\n",
- "current time = 0.023583, time step = 0.000952\n",
- "current time = 0.024535, time step = 0.000952\n",
- "current time = 0.025488, time step = 0.000952\n",
- "current time = 0.026440, time step = 0.000952\n",
- "current time = 0.027392, time step = 0.000953\n",
- "current time = 0.028345, time step = 0.000952\n",
- "...\n",
- "current time = 0.136983, time step = 0.000953\n",
- "current time = 0.137936, time step = 0.000953\n",
- "current time = 0.138889, time step = 0.000953\n",
- "current time = 0.139843, time step = 0.000953\n"
+ "current time = 0.001117, time step = 0.001031\n",
+ "current time = 0.002148, time step = 0.001000\n",
+ "current time = 0.003148, time step = 0.000972\n",
+ "current time = 0.004120, time step = 0.000962\n",
+ "current time = 0.005082, time step = 0.000954\n",
+ "current time = 0.006036, time step = 0.000944\n",
+ "current time = 0.006980, time step = 0.000955\n",
+ "current time = 0.007935, time step = 0.000953\n",
+ "current time = 0.008888, time step = 0.000950\n",
+ "current time = 0.009838, time step = 0.000947\n",
+ "current time = 0.010785, time step = 0.000943\n",
+ "current time = 0.011728, time step = 0.000942\n",
+ "current time = 0.012670, time step = 0.000943\n",
+ "current time = 0.013613, time step = 0.000947\n",
+ "current time = 0.014560, time step = 0.000952\n",
+ "current time = 0.015512, time step = 0.000950\n",
+ "current time = 0.016462, time step = 0.000950\n",
+ "current time = 0.017412, time step = 0.000949\n",
+ "current time = 0.018361, time step = 0.000949\n",
+ "current time = 0.019310, time step = 0.000949\n",
+ "current time = 0.020258, time step = 0.000950\n",
+ "current time = 0.021208, time step = 0.000951\n",
+ "current time = 0.022159, time step = 0.000953\n",
+ "current time = 0.023112, time step = 0.000952\n",
+ "current time = 0.024064, time step = 0.000951\n",
+ "current time = 0.025014, time step = 0.000950\n",
+ "current time = 0.025965, time step = 0.000951\n",
+ "current time = 0.026915, time step = 0.000952\n",
+ "current time = 0.027867, time step = 0.000953\n",
+ "current time = 0.028820, time step = 0.000953\n",
+ "current time = 0.029774, time step = 0.000953\n",
+ "current time = 0.030727, time step = 0.000953\n",
+ "current time = 0.031680, time step = 0.000952\n",
+ "current time = 0.032632, time step = 0.000952\n",
+ "current time = 0.033584, time step = 0.000953\n",
+ "current time = 0.034538, time step = 0.000954\n",
+ "current time = 0.035492, time step = 0.000954\n",
+ "current time = 0.036446, time step = 0.000954\n",
+ "current time = 0.037399, time step = 0.000954\n",
+ "current time = 0.038353, time step = 0.000953\n",
+ "current time = 0.039307, time step = 0.000954\n",
+ "current time = 0.040260, time step = 0.000954\n",
+ "current time = 0.041215, time step = 0.000954\n",
+ "current time = 0.042169, time step = 0.000954\n",
+ "current time = 0.043122, time step = 0.000954\n",
+ "current time = 0.044076, time step = 0.000954\n",
+ "current time = 0.045030, time step = 0.000954\n",
+ "current time = 0.045984, time step = 0.000954\n",
+ "current time = 0.046938, time step = 0.000954\n",
+ "current time = 0.047892, time step = 0.000954\n",
+ "current time = 0.048847, time step = 0.000954\n",
+ "current time = 0.049801, time step = 0.000954\n",
+ "current time = 0.050755, time step = 0.000954\n",
+ "current time = 0.051709, time step = 0.000954\n",
+ "current time = 0.052663, time step = 0.000954\n",
+ "current time = 0.053618, time step = 0.000954\n",
+ "current time = 0.054572, time step = 0.000954\n",
+ "current time = 0.055526, time step = 0.000954\n",
+ "current time = 0.056481, time step = 0.000954\n",
+ "current time = 0.057435, time step = 0.000955\n",
+ "current time = 0.058390, time step = 0.000954\n",
+ "current time = 0.059344, time step = 0.000954\n",
+ "current time = 0.060298, time step = 0.000955\n",
+ "current time = 0.061253, time step = 0.000954\n",
+ "current time = 0.062207, time step = 0.000954\n",
+ "current time = 0.063162, time step = 0.000955\n",
+ "current time = 0.064116, time step = 0.000955\n",
+ "current time = 0.065071, time step = 0.000954\n",
+ "current time = 0.066025, time step = 0.000955\n",
+ "current time = 0.066980, time step = 0.000955\n",
+ "current time = 0.067934, time step = 0.000954\n",
+ "current time = 0.068889, time step = 0.000955\n",
+ "current time = 0.069844, time step = 0.000955\n",
+ "current time = 0.070798, time step = 0.000955\n",
+ "current time = 0.071753, time step = 0.000955\n",
+ "current time = 0.072707, time step = 0.000955\n",
+ "current time = 0.073662, time step = 0.000955\n",
+ "current time = 0.074617, time step = 0.000955\n",
+ "current time = 0.075571, time step = 0.000955\n",
+ "current time = 0.076526, time step = 0.000955\n",
+ "current time = 0.077480, time step = 0.000955\n",
+ "current time = 0.078435, time step = 0.000955\n",
+ "current time = 0.079390, time step = 0.000955\n",
+ "current time = 0.080344, time step = 0.000955\n",
+ "current time = 0.081299, time step = 0.000955\n",
+ "current time = 0.082254, time step = 0.000955\n",
+ "current time = 0.083209, time step = 0.000955\n",
+ "current time = 0.084163, time step = 0.000955\n",
+ "current time = 0.085118, time step = 0.000955\n",
+ "current time = 0.086073, time step = 0.000955\n",
+ "current time = 0.087027, time step = 0.000955\n",
+ "current time = 0.087982, time step = 0.000955\n",
+ "current time = 0.088937, time step = 0.000955\n",
+ "current time = 0.089892, time step = 0.000955\n",
+ "current time = 0.090846, time step = 0.000955\n",
+ "current time = 0.091801, time step = 0.000955\n",
+ "current time = 0.092756, time step = 0.000955\n",
+ "current time = 0.093711, time step = 0.000955\n",
+ "current time = 0.094665, time step = 0.000955\n",
+ "current time = 0.095620, time step = 0.000955\n",
+ "current time = 0.096575, time step = 0.000955\n",
+ "current time = 0.097530, time step = 0.000955\n",
+ "current time = 0.098485, time step = 0.000955\n",
+ "current time = 0.099439, time step = 0.000955\n",
+ "current time = 0.100394, time step = 0.000955\n",
+ "current time = 0.101349, time step = 0.000955\n",
+ "current time = 0.102304, time step = 0.000955\n",
+ "current time = 0.103259, time step = 0.000955\n",
+ "current time = 0.104214, time step = 0.000955\n",
+ "current time = 0.105168, time step = 0.000955\n",
+ "current time = 0.106123, time step = 0.000955\n",
+ "current time = 0.107078, time step = 0.000955\n",
+ "current time = 0.108033, time step = 0.000955\n",
+ "current time = 0.108988, time step = 0.000955\n",
+ "current time = 0.109943, time step = 0.000955\n",
+ "current time = 0.110898, time step = 0.000955\n",
+ "current time = 0.111853, time step = 0.000955\n",
+ "current time = 0.112807, time step = 0.000955\n",
+ "current time = 0.113762, time step = 0.000955\n",
+ "current time = 0.114717, time step = 0.000955\n",
+ "current time = 0.115672, time step = 0.000955\n",
+ "current time = 0.116627, time step = 0.000955\n",
+ "current time = 0.117582, time step = 0.000955\n",
+ "current time = 0.118537, time step = 0.000955\n",
+ "current time = 0.119492, time step = 0.000955\n",
+ "current time = 0.120447, time step = 0.000955\n",
+ "current time = 0.121402, time step = 0.000955\n",
+ "current time = 0.122357, time step = 0.000955\n",
+ "current time = 0.123312, time step = 0.000955\n",
+ "current time = 0.124267, time step = 0.000955\n",
+ "current time = 0.125222, time step = 0.000955\n",
+ "current time = 0.126177, time step = 0.000955\n",
+ "current time = 0.127132, time step = 0.000955\n",
+ "current time = 0.128086, time step = 0.000955\n",
+ "current time = 0.129041, time step = 0.000955\n",
+ "current time = 0.129996, time step = 0.000955\n",
+ "current time = 0.130951, time step = 0.000955\n",
+ "current time = 0.131906, time step = 0.000955\n",
+ "current time = 0.132861, time step = 0.000955\n",
+ "current time = 0.133816, time step = 0.000955\n",
+ "current time = 0.134771, time step = 0.000955\n",
+ "current time = 0.135726, time step = 0.000955\n",
+ "current time = 0.136681, time step = 0.000955\n",
+ "current time = 0.137636, time step = 0.000955\n",
+ "current time = 0.138591, time step = 0.000955\n",
+ "current time = 0.139546, time step = 0.000955\n"
]
}
],
@@ -179,12 +336,12 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABkEAAAJtCAYAAACBs9diAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAAC1uklEQVR4nOzdeXyV9Z33//d11iSQhS0LEBZFEEQWQWm0i7YoYsto5zetd22LQ6vz04H7YeWedkoXre1dmXtmtHZ+Y0sXHTp3x2oXq3Up1mKRseLCEgUVEFkSIAlhyZ6cnO33xznXlaQEJJBc13Wu6/V8PM4j5ORc5HNoPed7vp/v5/Mx0ul0WgAAAAAAAAAAAB4TcDoAAAAAAAAAAACAoUASBAAAAAAAAAAAeBJJEAAAAAAAAAAA4EkkQQAAAAAAAAAAgCeRBAEAAAAAAAAAAJ5EEgQAAAAAAAAAAHgSSRAAAAAAAAAAAOBJJEEAAAAAAAAAAIAnhZwO4EykUikdPnxYhYWFMgzD6XAAAHCFdDqt1tZWjR07VoEA5xqGEmsRAABOxlrEPqxFAAA42ZmuRXIiCXL48GFVVlY6HQYAAK5UW1ur8ePHOx2Gp7EWAQDg1FiLDD3WIgAAnNr7rUVyIglSWFgoKfNkioqKHI4GAAB3aGlpUWVlpfU+iaHDWgQAgJOxFrEPaxEAAE52pmuRnEiCmKWeRUVFvNkDAPAXaIkw9FiLAABwaqxFhh5rEQAATu391iI07QQAAAAAAAAAAJ5EEgQAAAAAAAAAAHgSSRAAAAAAAAAAAOBJJEEAAAAAAAAAAIAnkQQBAAAAAAAAAACeRBIEAAAAAAAAAAB4EkkQAACQMzZu3KglS5Zo7NixMgxDTzzxxPteE4vF9PWvf10TJ05UNBrVpEmT9PDDDw99sAAAAAAAwHEhpwMAAAA4U+3t7Zo9e7a+8IUv6K//+q/P6JpPf/rTamho0EMPPaQpU6aorq5OqVRqiCMFAAAAAABuQBIEAADkjMWLF2vx4sVn/Ph169bpxRdf1N69ezVy5EhJ0qRJk4YoOgAAAAAA4Da0wwIAAJ71u9/9TvPnz9c///M/a9y4cZo6dar+4R/+QZ2dnae9LhaLqaWlpc8NAAAAAADkHipBAACAZ+3du1cvvfSS8vLy9Nvf/lZHjx7V3//93+vYsWP6j//4j1Net3r1at1zzz02RgoAAAAAAIYClSAAAMCzUqmUDMPQf/3Xf+myyy7Tddddp/vvv18/+9nPTlsNsmrVKjU3N1u32tpaG6MGAAAAAACDhUoQAADgWRUVFRo3bpyKi4ut+6ZPn650Oq2DBw/qggsu6Pe6aDSqaDRqV5gAAAAAAGCIUAkCAAA864orrtDhw4fV1tZm3bd7924FAgGNHz/ewcgAAAAAAIAdSIIAAICc0dbWpurqalVXV0uS9u3bp+rqatXU1EjKtLFaunSp9fibbrpJo0aN0rJly/T2229r48aN+vKXv6wvfOELys/Pd+IpAAAAAAAAG5EEAQAAOWPz5s2aO3eu5s6dK0lauXKl5s6dq7vuukuSVFdXZyVEJGn48OF6/vnn1dTUpPnz5+uzn/2slixZon/7t39zJH4AAAAAAGAvZoIAAICcceWVVyqdTp/y52vXrj3pvgsvvFDPP//8EEYFAAAAAADcikoQAAAAAAAAAADgSSRBAAAAAAAAAACAJ5EEAQAAAAAAAAAAnkQSBAAAAAAAAAAAeBKD0QEAlnQ6LcMwnA4DAAAAwBBKpdJK93N/f58E+vt4wGcGAEAuIQkCAD6XSqX1ws4j+tmm/dp64IRu+8j5uv3K8xUKUiwIAOdiy4Hj+vcX9qi6tklXTBmtz1w2QZefP4qNIwCA436wYY/+9Q+7h/z39JtAOeVj+/4kFDA0LBrSiIKwzhszXNPLC3XD3HE6b8zwwQ8UAOBpJEEAwOe++eQO/derNdb39z2/Wxt2N+rBmy5ReXGeg5EBQO6699l39OONe63vn36zTk+/Waf/cWmlvvvJixUMkAgBAHhfup9yk/4qUPp7cDKVVizRrePt3XqvsV3Pv92gf3thj66YMkrfuX4myRAAwBkjCQIAPvb24RY98lomAXLrhyZrwqhh+uff79SWAyd0639u1q9uq1JeOOhwlACQW17ff9xKgHx6/nh9YtZY/eHtej3yao0efb1WHd1J3ffp2QpTcQcAOeeHP/yhfvjDH2r//v2SpIsuukh33XWXFi9e3O/j165dq2XLlvW5LxqNqqura6hDPa1bPnSePveBiX3uO9OERbqfB/b/uP7+vlOkQPq5uzuZUnssqcbWmN5rbNOLuxv1p11H9Oc9x/TXP3xZP/rcPC04b1T/fx8AAL2QBAEAH/uX53YqnZY+MatCX//4DEnSh6aM1id/8GdtP9Ssu598S//nb2Y5HCUA5I7uREpfe3y7JOnG+ZXWa+iHp45R1Xmjdcej2/S7Nw4rGDB0/6dn0xoLAHLM+PHj9U//9E+64IILlE6n9bOf/UzXX3+9tm3bposuuqjfa4qKirRr1y7reze89ueFgzlz2GlaeaE+eMFo3Xz5JNUe79CKX2zTG7VN+txDr+onS+frymmlTocIAHA5jp8BgE+9uveY/rSrUaGAof91zTTr/kmjh+n/+8wlChjSY5tr9djrNaf5WwAAvf3kv/fq3SNtGjUsolXXXdjnZx+fVaEffX6eQgFDv912SN/747sORQkAOFtLlizRddddpwsuuEBTp07Vd7/7XQ0fPlyvvPLKKa8xDEPl5eXWrayszMaIvaVyZIEevfUDuvaicsWTaf3Dr97Q0baY02EBAFyOJAgA+NT312c23268tFKTRw/r87MPXjDaSox863dva29j2xn/vc0dcf1443v69lNv66V3jyqRTA1e0ADgYl3xpH704nuSpK9/fLpKCiInPeZj08v03U/OlCT92/p3STQDQA5LJpN69NFH1d7erqqqqlM+rq2tTRMnTlRlZaWuv/56vfXWW+/7d8diMbW0tPS5ISM/EtQD/2OOppUV6mhbt1Y9vr3fFl0AAJhIggCAD9U3d2nT3mOSpNuvPL/fx9z+kfN1+fmj1BlP6s7HqhV/n2RGVzypf163U1X/tF73PrtTD/95nz730Kv60D//SfXNzvY8BgA7PLu9Ti1dCY0rydf1c8ad8nE3XjpBf5997f3q49v1q821doUIABgE27dv1/DhwxWNRnXbbbfpt7/9rWbMmNHvY6dNm6aHH35YTz75pH7+858rlUrp8ssv18GDB0/7O1avXq3i4mLrVllZORRPJWflhYP63o1zFAkG9PzbDfrN1kNOhwQAcDGSIADgQ89sr1M6Lc2fOELjRxT0+5hAwNB9n56toryQ3jjYrP/z+52n/Pt2HGrWX/37S/rBhvfU0Z3UheWF+tS88SqMhlTX3KU/7ToyVE8FAFzjF69lqjr+x6WVCgZO3+/9y4um6fMfmKh0WvrKb97UT/97r5IpTrECQC6YNm2aqqur9eqrr+r222/XzTffrLfffrvfx1ZVVWnp0qWaM2eOPvKRj+jxxx/XmDFj9KMf/ei0v2PVqlVqbm62brW1JMz/0oyxRfrS1RdIku7/wy7FEkmHIwIAuBVJEADwoafeOCwpMxD9dCqK87X6rzNDfX/60j6tybZ5MSVTaT34pz365A/+rN0NbRo9PKI1n5un39/xIf3Lp2Zr6eUTJUnbak4MwbMAAPfYc6RVr+8/oWDA0Kfmv/9pXcMw9O3rL7ISIf/7mXd0/YMvad2OOjV3xG2IGABwtiKRiKZMmaJ58+Zp9erVmj17tr7//e+f0bXhcFhz587Vnj17Tvu4aDSqoqKiPjec7AtXTFZZUVSHm7v0y9dJFAEA+hdyOgAAgL1qj3eourZJAUO67n2SIFJmkG/tiQv1T7/fqX/6/U7VNXXq6hnl2nesXY+9XqMdhzL9iRddVKZ7P3mxRg2PWtfOqRwhSaqubRqS5wIAbvGL1zIbLx+9sFTlxXlndI2ZCJlaXqh/WbdTOw616Lafb5VhSGOL81VaFFVpYVSlhXmZr0VRVRTna9KoYRo3Iv99q028LplK63BTp/Yebdf+o+1q7owrkUxpeF5IE0cN0wWlwzV59DAZhr//nc5VOp1WfUuXKorznQ4FcK1UKqVY7MyGcyeTSW3fvl3XXXfdEEflD3nhoFZcNUXffPIt/fuf9uhT8yuVFw46HRYAwGVIggCAzzz9Zp0k6QPnjVJp4Zlt1N32kfPV1BHXmhff0882HdDPNh2wfjY8GtK3/uoi/T+XjDtpo2lOZYkk6d0jbWrpiqsoLzw4TwIAXCSRTOm32zK9yD9z2cB6thuGoc9/YKIWzyzXD/70nl7cfUTvNbbrUFOnDjV1nvK6cNBQ5cgCTRo1TJNGDVPlyHwNi4SUFwkqLxRQXjioge79R0NBjRwWVmlR3qC8XidTaQUMDSgJkUyldawtpoaWmOpbunS0LabO7qRiiZRiiaQ6u5OqOd6hvY3t2nesXd2J08+rGlEQ1twJIzRv4ghdPK5YY0vyNKIgIsMwlE6nlZaUSqeltCRDGjUsekbJpe5ESrvqW7XvWLvqmzvVFkuqIBJUcX5YlSMKNGFkgSpK8hQO2ld4n06nlUyllUhlvsaTKZ3oiOt4e0wBw1BBJKSxJXkqHMD/th3dCd3+8616cXejPnZhqb71VxepcmTfNppHWrv05LbDOnC8XUdbu5UfCaq8OE9TxgzX/EkjNGFkwfv+f+B4e7de3XtMuxpadfBEp4rzwyorimpaeZEuHleskcMi7xtrVzyp3Q2t2lnXqqbObpUURDR59DDNnziCRBgG1apVq7R48WJNmDBBra2teuSRR7RhwwY999xzkqSlS5dq3LhxWr16tSTp29/+tj7wgQ9oypQpampq0r/8y7/owIEDuuWWW5x8Gp7y6Usr9cMN7+lwc5d+8VqNll0x2emQAAAuQxIEAHzm6TfNVlhjB3TdP147TXMqS/SHt+v16t7jGj08oiWzx+qv5ow9ZTJlTGFU40fk6+CJTr1Z26wPXjD6nOMHALd5bf9xHW/v1oiCsD58wZiz+jtGD4/qriUzJM1QY2tMNcc71NjapSOtMR1pielI9s+Hmzq1/1iHuhMp7W1s197G9sF9MlkVxXmaWlaoaeWFmlZWqBljizSldPhJm/qpVFp1LV1653CL3q5r0VuHm7WzvlXH2rrVFksoYGSS5SOGRayqljGFURXmhRRPptUeS6ihpSt7i6mxLTag2SiRYECTRmeSQaOGRxQKBNTUGdf+o+3a3dCqEx1xvbDziF7YeWazqUIBQxUleaocUaDKEQUaPyLf2vQ/0dGtd4+0afvBZu2qb1V38vQJmGDAUFlhVEX5YeWFg5mESzZBkUqnNTwaUklBWONK8jV59DBNHjNc540epvLiTPIknU6rM57UoROd2nOkTe8eadOeI22qPdGhY23dVuWLmfRInOG/W+XIfF1YXqTp5YUqL86XYWQSCMfbu9WdTKkoL6zi/MztoZf2WdWc63ce0Ut7jurGSyt146WVqj3eoXU76vXM9jrFk6f+3aOHRzVvYonmTcwkoyaPHq6O7oRqj3fq9f3H9dK7R7X5wHGdLvzxI/I1a3yxLh5XolnjixUOBnS8Pab3Gtv1Tl2L3qlr0b6j7f3+HR+cMlp3L5mhC8oKz+jfB3g/R44c0dKlS1VXV6fi4mLNmjVLzz33nK6++mpJUk1NjQKBntfKEydO6NZbb1V9fb1GjBihefPm6eWXXz7lIHUMXDQU1O1XTdE3n9ihh/+8T0urJvm+WhIA0JeRTqddP4GxpaVFxcXFam5upg8mAJyD2uMd+tA//0kBQ9r8javP6GTlufqfv9imp944rH+4ZqpWfPSCIf99fsL7o334t8bp3P3kDv1s0wF9ev54/fPfzB7y32cmHvYfbde+o+06cKxdh5u61BlPqiuezH49/QZ9f7riSR1ri6mlK9Hvz0MBQ2VFeSotiiqdllWV0Rkf3EG0ASOTRC8vyiRM8iMhRUOB7C2osSV5Or90uM4fPfy0bcG6Eym9XdeirQdOaEvNCe2qb9WRlq5+n59hSAP9VFScH9a0skJVlOSpMC+kzu6UTnR0q/Z4h2qOdyj2PlUqp5MXDiiV1vtWuryfYZGgRg2PKq202roSOnEW82ZKCsL69vUz9ehrNXr5vWP9PuaSCSX64JTRGjU8qs54UoebOrXjULN2HGp532SR6cLyQs0cV6yJIwvUGkvoUFOn3j6cSW6cqREFYU2vKFJpYVQnOuLatPeYuhMpRYIBPfU/P6hp5SRCBhvvj/bh3/r0OruTWnDvH9XSldDDfztfH72wzOmQAAA2ONP3RypBAMBHnnurXpJ02eSRtiRApExLrKfeOKxtNU22/D4AsFMqldZzbzVIkq6dWW7L7wwEDI0ryde4knxdMWXwK+yaO+Pac6RVu+rbtKu+Re/Ut+qdwy3WxvRftukKBQxNKR2uGRVFmjG2SDMqilRRkq/CvJBSqbRauhI63t6txtZMRUtja0ztsYRCwYDyw0GVFUVVVpSn8uI8lRXlafTwM2tJ9X4ioYDmVJZoTmWJvqCe1ihmpclftupKptI60tql2uOdOniiQ7XHO1V7okMHT3QoYBgaURDR+JH5mjWuRBePK1blyPxTtllKpdJqbMtU7rTHkuroTihgGAoGDAUChgKG1B5L6Hh7XDXHO7TvaJv2NrbrwLEOdSdTfZJYw6MhnT9mmM4vHa4ppcM1edQwjSmMqqQgrHAwoGDAsL6GAkb2a0ChoHFS5c6J9m7trG/VzvpM9cSJjrjSaSkaCmjksIgioYBaOuNqzt6GR0P66uILdUFZoZbMqtCm947p4T/v0/qdRzR59DB9+IIx+uTccZqdbX/5l7riSe041KwtB05Yt2Pt3YqEAho1LKJLJo7Qgskj9dELSzV+REG/f0dzZ1xvHWrWm4eatf1gs9463CzDMDSiIKzKkQWaXlGkC8sLNaOiSGMKo33+N6k51qH/+YuteuNgs55+87CmlU87/f9pAOSs/EhQn5pfqYde2qf/3HSAJAgAoA+SIACQgzq7k3rktRpFgobGjyzQh6aMVugM+o7/IbtRd80MezbqJGnuhBJJmeHo6XSavtwAPOWNg02qb+nS8GhIl5/vjZZ/xflhzZs4UvMmjrTuS6Uyw7HrmjvV2NqtYMBQNBRQ5chMy6jTzb4oddmB5VMlWIIBQxXF+aooztdlk0f2+5gzFchWzZQVndnsLVMylVZrV1wtnQkZhjRyWEQFkeCgvXeOGBZR1fmjVHX+qAFfaxiGLp8yWpdPGa1kKn1Giaq8cFDzJ43U/EmZf890OtO2ayCzUorzw9bvHagJowp004IJeuPgdr2yt/8qFgDe8bkPTNRDL+3Ti7sbdeBYuyaOGuZ0SAAAlyAJAgA56Cf/vVf3P7/b+v6jF5bqoZvnn3aT5GhbTJsPHJckXXORfSejLhpbpEgwoGPt3ao93qkJo/o/6QkAuWhdtsLuqgtLlRcOOhzN0AkEDI0tydfYknynQ/G0YMBQSUFEJQX2VGuerbOt1DEMQ+GgvYchPnBeJuFTXdukzu6k8iPe/e8U8LvJo4fpw1PHaOPuRv3XqzX62nXTnQ4JAOASZ34EBwDgGs9ur5Mkza4sUSQU0As7j+h3bxw+7TXr32lQKi3NHFd0ypYTQyEaCmp6RaYH9/ZDzbb9XgAYaul0Ws/tyCRBrr3Ivgo7AGduwsgCjS3OUzyZ1pYDJ5wOB8AQW/qBiZKk32w5qPgZziQCAHgfSRAAyDEHjrVrZ32rggFDP1t2qe74WGbY+LefeltNHd2nvM7sWb/IxlZYplHDo5Iy/c8BwCv2Hm3X/mMdigQDunLaGKfDAdAPwzCsahBaYgHed+W0MRo9PKpj7d16cVej0+EAAFyCJAgA5BhzuPkHzhupkoKIbv3QebqgdLiOtXfr/6zb1e81x9u79dK7RyVJi2wa3Nub2fqim9NYADxkQ3Zz5bLJIzUsSpdZwK3MJMgmkiCA54WCAd0wZ6wk6TdbDzocDQDALUiCAECOWZdtvbIo23olEgrou5+8WJL02Os12nOk7aRrfr2lVt3JlC4eV6ypZYX2BZtlDkClJB2Al2zYdUSSqAIBXM4cBP9GbZM6uqlKBbzu/5k3XpK0/p0jp62UBwD4B0kQAMghR1q6tLWmSZJ0Ta+2VpdNHqmF08uUSkv3P9+3GiSdTusXr9VKkm5aMMG2WHuLZJMg3QmSIAC8oaM7oVf3HpckXTmt1OFoAJzO+BH5GleSr0Qqrc37mQsCeN30iiLNqChSdzKlp95nbiIAwB9IggBADvnD25m5HnMnlKi8OK/Pz768aJoMQ3p2e722H+wZQL5p7zHtO9qu4dGQ/mr2WFvjNUVCVIIA8JZN7x1TdzKl8SPydf6YYU6HA+A0DMPQrPHFkqT3Gk+umAXgPWY1yG+2HnI4EgCAG5AEAYAcsqu+VZJ0ebatQ2/Tygt1w5xxkqR7nnpLXfGkJOmRV2skSdfPGetYz3qzHVZ3Mu3I7weAwfanXq2wDMNwOBoA76cgklkDxahKBXzhr2aPVcCQqmubVHu8w+lwAAAOIwkCADmksTUmSSoryuv353cunKqCSFCbD5zQ8v/aqv+zbqee2V4nyblWWBIzQQB4SzqdtoaiXzmVVlhALjCrUmnNCfjDmMKoPnBe5uDY02/WORwNAMBpJEEAIIccbcskQcYMj/b78wmjCvTTm+crGgpo/c4j+uGG95ROS1/84GRdNLbYzlD7CIcyp6TjbDwA8IC9R9t18ESnIsGALp9ycmUeAPeJZpMgsUTS4UgA2OUTszKtgJ9+k7kgAOB3JEEAIIc0ZpMgowv7T4JI0uXnj9ZPls5XJBRQYV5ID950ib75iRl2hdivqNUOiyQIgNz35z1HJUnzJo6wWuwAcLdomEoQwG+unVmuYMDQW4dbtO9ou9PhAAAcxKc2AMghZjusU1WCmD48dYxe+spVioaDKs4P2xHaadEOC4CX/Pe7mSTIBy8Y7XAkAM6UeSCDmSCAf4wcFtEVU0Zr4+5GPf3GYf3Pj13gdEgAAIdQCQIAOaI9llBHd6aFw5jTVIKYSovyXJEAkaSw1YebwegAclsimdIr7x2TJH2IJAiQM6LhoCQqQQC/+cSsCknMBQEAvyMJAgA5wpwHkh8Oalg0twr5qAQB4BVvHGxSayyhkoKwo7OWAAxMhEoQwJcWzShXOGhoV0Or3m1odTocAIBDSIIAQI6wWmGdQRWI20SC2cHoJEEA5DizFdYV549WMGA4HA2AM2XOBGEwOuAvxQVhffiCMZKkp6gGAQDfIgkCADnCrATJySRIiGGkALzhJTMJMoVWWEAuibIWAXzrE7PNlliHlU7TnhcA/IgkCADkCLMSZPTwiMORDJzZDqubShAAOawtltC22iZJzAMBco15IIN2WID/LJxepkgooL2N7XqnjpZYAOBHJEEAIEc0tnVLys1KEGaCAPCC1/YdUzKV1oSRBaocWeB0OAAGIBrKDEYnCQL4T2FeWFdNy7TEevrNww5HAwBwAkkQAMgR1kyQ4XkORzJwPUkQys8B5K5N7x2TJF1+/iiHIwEwUAxGB/ztE7PGSpKefrOOllgA4EMkQQAgR1jtsApzrx1WJMRgdAC5b9PeTBKkiiQIkHOswehxBqMDfvSx6aXKCwdUc7xDb9e1OB0OAMBmJEEAIEdYg9GH5147rEgw04KCYaQAclVzR1xvHc5smlSdRxIEyDUR5pMBvlYQCekjUzMtsZ7bUe9wNAAAu5EEAYAcYbXDysmZIJlKEDYeAOSqV/YdUzotnTdmmEqLcq8tIeB30XB2JkictQjgV9fOLJckrXuLJAgA+A1JEADIAel0Wo3ZSpDROVgJEg4xGB1AbjPngVAFAuSmaIhKEMDvPnphmUIBQ7sb2vReY5vT4QAAbEQSBAByQGssYbWSysVKELMFRTzBEEIAuemVveZQ9NEORwLgbERCzAQB/K44P6zLp2Tex5+jGgQAfIUkCADkALMVVmE0pLxsO4dcEuH0JYAcdqwtpp31rZKkD5w30uFoAJwNKkEASNK1F2VbYjEXBAB8hSQIAOSAXJ4HIklhqxKEjQcAuefVfcclSdPKCjUqB1sSAuhVCZJIKZ2mMhXwq2suKpNhSG8ebNahpk6nwwEA2IQkCADkgKPmPJCcTYIwGB1A7rLmgZzPPBAgV0VDmUradFqKJ0mCAH41enhUl07KVHU+RzUIAPgGSRAAyAFWJUiOnkC2ZoKQBAGQg15+76gk6QMMRQdyltkOS+JQBuB3Vkss5oIAgG+QBAGAHGBWguR6O6xUWkqmOH0JIHccaenSe43tMgzmgQC5zDyQITEcHfC7RTMzSZDX9x+3DpsBALyNJAgA5ICjrd2SpFHDIg5HcnYivU9fMhcEQA7ZtDfTCmt6eZFKCnLzNRiAFAgYViKEShDA38aV5GvW+GKl09Lzbzc4HQ4AwAYkQQAgB3RmTywOi4YcjuTshIO0oACQm17JJkEuZx4IkPOs4ehx1iKA3107k5ZYAOAnJEEAIAfEEpkkSDScmy/b5mB0ibkgAHILQ9EB7zDngnAgA4A5F+TlPUfV3Bl3OBoAwFDLzd00APCZWLaFVDQUdDiSs2MYhpUIIQkCIFfUNXdq/7EOBQzp0snMAwFyHZUgAEznjRmuqWXDlUil9cJOWmIBgNeRBAGAHGB+WI+Gcvdl2+zDHU8wGB1AbjBbYV08rlhFeWGHowFwrsx1lFlhC8DfzGqQ32+nJRYAeF3u7qYBgI9Y7bByOAkStlpQsPEAIDe8tu+4JGnBebTCArzArKjtTlAJAkBalJ0L8uLuRnV0JxyOBgAwlHJ3Nw0AfMRqhxXOzXZYUs9w9G4qQQDkiFezSZDLJtEKC/ACqx0WSRAAkmZUFGnCyALFEim9uKvR6XAAAEOIJAgA5ICemSC5+7JttcNiJgjO0caNG7VkyRKNHTtWhmHoiSeeOONr//znPysUCmnOnDlDFh+8obE1pr2N7TIM6VKSIIAnREmCAOjFMAxdm60GWfcWLbEAwMtydzcNAHzEE+2wGIyOQdLe3q7Zs2frwQcfHNB1TU1NWrp0qT72sY8NUWTwktf3Z6pAppUVqriAeSCAF0SYCQLgLyzKzgV54Z0jvDYAgIeFnA4AAPD+egaj5247rIg1E4QkCM7N4sWLtXjx4gFfd9ttt+mmm25SMBgcUPUI/MmaBzKZKhDAK6gEAfCX5laWqLQwqiOtMb2855iuurDU6ZAAAEMgd48UA4CP9MwEyd2X7Z6ZIGw8wH7/8R//ob179+ruu+8+o8fHYjG1tLT0ucFfrHkgkxmKDniFdSCDtQiArEDAsKpB1u2gJRYAeFXu7qYBgI94ox2WOROEweiw17vvvquvfvWr+vnPf65Q6MyKYFevXq3i4mLrVllZOcRRwk2aO+PaWZ9JfF06eYTD0QAYLGZFLZUgAHpbnJ0L8vw7DUpQtQ4AnpS7u2kA4BPpdLrXYPQcbofFYHQ4IJlM6qabbtI999yjqVOnnvF1q1atUnNzs3Wrra0dwijhNlsOHFc6LU0ePUylhXlOhwNgkESpBAHQj8smj1RJQVjH27v1+v4TTocDABgCzAQBAJeLJ9NKZ4sncrodVojB6LBfa2urNm/erG3btmnFihWSpFQqpXQ6rVAopD/84Q/66Ec/etJ10WhU0WjU7nDhElYrrEnMAwG8hMHoAPoTCgZ09fQy/WrLQT33Vr2qzqcVJgB4Te7upgGAT/T+oJ7L7bAizASBA4qKirR9+3ZVV1dbt9tuu03Tpk1TdXW1FixY4HSIcKHXrHkgJEEALzEralmLAPhL187smQuSStG+FwC8ZsC7aRs3btSSJUs0duxYGYahJ5544rSPf/zxx3X11VdrzJgxKioqUlVVlZ577rmzjRcAfKd332ozkZCLrMHoVILgHLW1tVkJDUnat2+fqqurVVNTIynTymrp0qWSpEAgoJkzZ/a5lZaWKi8vTzNnztSwYcOcehpwqY7uhLYfbJZEEgTwmp5KENYiAPq6YspoDYsEVd/SpeqDTU6HAwAYZAPeTWtvb9fs2bP14IMPntHjN27cqKuvvlrPPvustmzZoquuukpLlizRtm3bBhwsAPhRzzyQgAzDcDiasxfObjzE2XjAOdq8ebPmzp2ruXPnSpJWrlypuXPn6q677pIk1dXVWQkRYKC21TQpkUprbHGexo/IdzocAIMoSjssAKeQFw7qo9PLJEnPvVXvcDQAgME24Jkgixcv1uLFi8/48Q888ECf7++99149+eSTeuqpp6zNCwDAqcXimQ/qudwKS+o9GJ3ycpybK6+8Uun0qf9/tHbt2tNe/61vfUvf+ta3BjcoeMarvVph5XLiGcDJIgxGB3Aa115UrqfeOKx1O+r11WsvZB0AAB5i+2D0VCql1tZWjRx56vYCsVhMsVjM+r6lpcWO0ADAlcxKkEi2j3WuitAOC0AOeG3fMUnSZZMZigp4TZR2WABO48ppYxQNBXTgWId21rdqekWR0yEBAAaJ7ceK//Vf/1VtbW369Kc/fcrHrF69WsXFxdatsrLSxggBwF16t8PKZeFQ5iQVpy8BuFUskdS2miZJzAMBvCgaZjA6gFMbFg3pw1PHSJJ+v4OWWADgJbbuqD3yyCO655579Mtf/lKlpaWnfNyqVavU3Nxs3Wpra22MEgDcxWqHFc7xJIjVDouNBwDutP1gs2KJlEYNi+j8McOcDgfAIIsGqQQBcHrXXlQuSXqOJAgAeIpt7bAeffRR3XLLLfrVr36lhQsXnvax0WhU0WjUpsgAwN16KkG80Q6LJAgAt3ptf2YeyKWTmAcCeJF5oIRKEACnsnB6mUIBQ7saWrW3sU3njRnudEgAgEFgy7HiX/ziF1q2bJl+8Ytf6OMf/7gdvxIAPMMz7bAYjA7A5TbvPyFJupRWWIAnRaxKkKTDkQBwq+KCsKrOz8wFW/cW1SAA4BUD3lFra2tTdXW1qqurJUn79u1TdXW1ampqJGVaWS1dutR6/COPPKKlS5fqvvvu04IFC1RfX6/6+no1NzcPzjMAAI8zP6jnehIkEmIwOgD3SqXS2pytBJk/cYTD0QAYCmYlCO2wAJzOtTNpiQUAXjPgHbXNmzdr7ty5mjt3riRp5cqVmjt3ru666y5JUl1dnZUQkaQf//jHSiQSWr58uSoqKqzbHXfcMUhPAQC8LRbPVoKEc7sdllkJQgsKAG60p7FNLV0J5YeDmjG2yOlwAAwBs7UoaxEAp3PNjHIZhvTGwWYdaup0OhwAwCAY8EyQK6+8Uun0qVuZrF27ts/3GzZsGOivAAD04p12WJn++swEAeBGZiusOZUlVtIWgLeYValUggA4nTGFUV06caRe239cz+2o1xc+ONnpkAAA54hPeADgct0ea4dFEgSAG222hqLTCgvwKnMtRSUIgPezKNsSi7kgAOANub2jBgA+0FMJktvtsCJWOywGowNwn80HMpUg8yYxFB3wqp5KEAajAzg9cy7I6/uPq7E15nA0AIBzRRIEAFzOSoKEc/sl22wvQyUIALc50tKlmuMdChjSJRNKnA4HwBAxD5SY89YA4FTGleRr1vhipdPS8283OB0OAOAc5faOGgD4QMwj7bDCtKAA4FJmFci08iIV5oUdjgbAULEqQTiQAeAMXEtLLADwjNzeUQMAHzBPK+Z+OywGowNwJ3MoOvNAAG/rPRMknaY9J4DTu/aiTBLk5T1H1dwRdzgaAMC5IAkCAC7XMxMkt1+yaYcFwK02H8gMRZ83kSQI4GW911LdrEcAvI/zxgzX1LLhSqTSWr+TllgAkMtye0cNAHzAaoeV4zNBzBYU3UlOXgJwj47uhN463CJJms9QdMDTIr2SIDHacwI4A9fOrJAk/X4HLbEAIJfl9o4aAPhATyVIbrfDohIEgBtV1zQpmUprbHGexpXkOx0OgCEUCfaqBCEJAuAMmC2xNu5uVHss4XA0AICzRRIEAFyuZyZIbr9km0kQNh0AuIk5FH0eVSCA5xmG0TMcnfUIgDMwvaJQE0cVKJZI6cXdjU6HAwA4S7m9owYAPmC1w8rxJEiEShAALmQmQRiKDviDuZ6KxZMORwIgFxiGYVWD0BILAHJXbu+oAYAPWO2wwjneDitkSCIJAsA9kqm0tpqVIAxFB3whas0oYz0C4MwsmplJgrzwToO6SKACQE4iCQIALtczEyS3X7IjtMMC4DK76lvVFktoeDSkC8uLnA4HgA3MGWtmu1EAeD9zxpeovChP7d1JvfzeUafDAQCchdzeUQMAH/BKOyxrJggnLwG4xOYDxyVJcyeUKBgwHI4GgB2oBAEwUIGAoUUXlUmSfr+dllgAkItye0cNAHygZzB6brfDMgeRxpNphyMBgIzN+815IAxFB/zCGoxOJQiAAbh2ZoUk6fl3GpQgiQoAOYckCAC4XM9MkNx+yTYrQZKptJIpEiEAnLd5f6YSZD7zQADf6KkEoa8/gDN36aQRGjksoqaOuF7dd9zpcAAAA5TbO2oA4ANeaYcV6RU/w9EBOO1wU6cON3cpGDA0Z0KJ0+EAsAmVIADORigY0NXTMy2x1u2gJRYA5Jrc3lEDAB/oGYye2+2wwsGefvskQQA4bfOBTCusi8YWqSAScjgaAHaxBqMnWIsAGJhrLy6XJD33Vr1SVLYDQE4hCQIALtczEyS3X7LDgZ74u9l4AOAwsxXWPFphAb5itcNiLQJggC4/f5QKoyEdaY1pW+0Jp8MBAAxAbu+oAYDHpdPpnnZYOT4TJBAwFApkqkEYjg7AaQxFB/zJaoeVYCYIgIGJhoL62PRSSbTEAoBck9s7agDgcYlUWmalda63w5J6hqPTDguAk1q74tpZ3yKJoeiA30StJAhrEQADd+3MTEus3++oVzrNwS4AyBUkQQDAxXp/QM/1dlhSz+nLbpIgABy0raZJqbQ0YWSBSovynA4HgI0iJEFwjn74wx9q1qxZKioqUlFRkaqqqvT73//+tNf86le/0oUXXqi8vDxdfPHFevbZZ22KFoPtw1PHKC8c0METnXrrcIvT4QAAzlDu76gBgIfF4j2tGryQBKESBIAbmEPRqQIB/IckCM7V+PHj9U//9E/asmWLNm/erI9+9KO6/vrr9dZbb/X7+Jdfflmf+cxn9MUvflHbtm3TDTfcoBtuuEE7duywOXIMhoJISFdOzbTEeu4tWmIBQK7I/R01APAw8wN6JBSQYRgOR3PuIsHMc2AYKQAnbTmQHYo+iSQI4DfB7HqKNjY4W0uWLNF1112nCy64QFOnTtV3v/tdDR8+XK+88kq/j//+97+va6+9Vl/+8pc1ffp0fec739Ell1yif//3f7c5cgwWsyXWs9vreC0BgBxBEgQAXMxMgnihCkSSwiEqQQA4K5FMqbqmSZI0fyJD0QG/CQQySZAUG5cYBMlkUo8++qja29tVVVXV72M2bdqkhQsX9rlv0aJF2rRpkx0hYgh8dHqpIsGA3mts1+6GNqfDAQCcgZDTAQAATi2WyLTD8sJQdEmKZNthdSfYeADgjF0NrWrvTqowGtIFpcOdDgeAzQKGmQRxOBDktO3bt6uqqkpdXV0aPny4fvvb32rGjBn9Pra+vl5lZWV97isrK1N9/elbKcViMcViMev7lhbmT7hFUV5YH546Wn9854ie2V6naeWFTocEAHgf3jhaDAAeFYt7rBKEmSAAHLY1WwUyZ0KJdSIcgH+Y/9lTCYJzMW3aNFVXV+vVV1/V7bffrptvvllvv/32oP6O1atXq7i42LpVVlYO6t+Pc3PdxRWSMi2xAADu541dNQDwKKsdVtgbL9e0wwLgtK3ZoejzGIoO+JJVCUIpCM5BJBLRlClTNG/ePK1evVqzZ8/W97///X4fW15eroaGhj73NTQ0qLy8/LS/Y9WqVWpubrZutbW1gxY/zt3CGWWKBAPac6RNuxtanQ4HAPA+vLGrBgAe5b12WAxGB+CsLSRBAF8zaIeFIZBKpfq0ruqtqqpK69ev73Pf888/f8oZIqZoNKqioqI+N7hHUV5YH7pgtCTpmTepBgEAtyMJAgAu5tV2WN1UggBwQGNrTDXHO2QY0pzKEqfDAeCA7FKEdlg4a6tWrdLGjRu1f/9+bd++XatWrdKGDRv02c9+VpK0dOlSrVq1ynr8HXfcoXXr1um+++7Tzp079a1vfUubN2/WihUrnHoKGCS0xAKA3MFgdABwMasdlkeSIBGrHRYbDwDst7UmUwUyraxQhXlhh6MB4ASzHRY5EJytI0eOaOnSpaqrq1NxcbFmzZql5557TldffbUkqaamRoFAz9r98ssv1yOPPKJvfOMb+trXvqYLLrhATzzxhGbOnOnUU8AgWTijTOGgoXePtOndhlZdUMaAdABwK5IgAOBiVjussDfaYTEYHYCTzHkgcyfQCgvwq552WGRBcHYeeuih0/58w4YNJ933qU99Sp/61KeGKCI4pTg/rA9dMEYv7DyiZ7bX6UskQQDAtbxxtBgAPMpzlSAkQQA4yKwEYR4I4F+BTA6EJAiAQUFLLADIDd7YVQMAj4rFM5UgEY8kQcIMRgfgkO5ESm8cbJZEEgTwM7MdFucxAAyGq7MtsXY3tGnPkVanwwEAnII3dtUAwKO8VgkSCNCCAoAz3jrcrO5ESiOHRTRpVIHT4QBwSDBgzgRhLQLg3BXnh/XBKaMlSc+8We9wNACAU/HGrhoAeFRPEsQbM0ECVh9uhwMB4Dtba5okSZdMKLFmAgDwH4N2WAAGGS2xAMD9SIIAgItZg9G9UgmS3XhIkgUBYDNzKPoltMICfI0DGQAG2zUzyhUOGtrV0Ko9R9qcDgcA0A9v7KoBgEfF4tlKkLA3Xq5pQQHAKVvMJMgEkiCAnzEYHcBgKy4I64psSyyqQQDAnbyxqwYAHuW1dlgGw0gBOOBwU6fqW7oUDBiaPb7E6XAAOMiqBKEUBMAgoiUWALgbSRAAcDGvtsPi9CUAO5lVIDMqipQf8UZSGcDZoR0WgKFwzYwyhQKGdta36r1GWmIBgNt4Y1cNADyqpxLEGy/XQYN2WADsZyZB5jEPBPA9DmQAGAolBRGrJdbvqQYBANfxxq4aAHhUz0wQb5xcttphsfEAwEbbahiKDiAjYM0nczgQAJ7z8WxLrKffJAkCAG5DEgQAXMxqhxX0xsu1ORidFhQA7NLZndRbh1skSZdMKHE2GACOM6x2WCxGAAyuay7qaYm15wgtsQDATbyxqwYAHpXIZgvCIcPhSAYHLSgA2O3Ng01KpNIqK4pqXEm+0+EAcJi5FklyIgPAICspiOhDF2RaYj395mGHowEA9EYSBABcLJHMfEAPBrzxcm0NI2XjAYBNttY0ScrMAzFPgAPwryCD0QEMoSWzx0qSnnrjMHMQAcBFvLGrBgAelUhlZoKEA97YuAvQDguAzcyh6JdMYB4IgJ4DGWxOAhgKV88oUyQU0HuN7XqnrtXpcAAAWSRBAMDFzHZYIY/MBKEdFgA7pdNpbWUoOoBeDNYiAIZQYV5YH51WKkl6ipZYAOAa3thVAwCPMtthhbxSCUI7LAA2OnCsQ8fbuxUJBnTR2CKnwwHgAgHaYQEYYrTEAgD3IQkCAC4WT2baYYWCHkuC8FkAgA3MVlgXjy9WNBR0OBoAbmCOWaMSBMBQ+eiFpSqIBHXwRKeqa5ucDgcAIJIgAOBqyZQ5GN1bSZAkGw84Bxs3btSSJUs0duxYGYahJ5544rSPf/zxx3X11VdrzJgxKioqUlVVlZ577jl7goWjtmRbYc2jFRaArJ4DGaxFAAyN/EhQV88okyQ99Uadw9EAACSSIADgauZMkLDHZoJQFo5z0d7ertmzZ+vBBx88o8dv3LhRV199tZ599llt2bJFV111lZYsWaJt27YNcaRw2lZrKHqJs4EAcI2e1pwOBwLA05bMyrTEevrNw9bBNgCAc0JOBwAAODWrHZZXKkECbDzg3C1evFiLFy8+48c/8MADfb6/99579eSTT+qpp57S3LlzBzk6uEVrV1y7GlolSZdMoBIEQAaVIADs8KGpo1WUF9KR1phe339cHzhvlNMhAYCveeNoMQB4lHlqKBTwxss17bDgBqlUSq2trRo5cqTToWAIvVHbrHRaqhyZr9KiPKfDAeASPVWpzsYBwNuioaCunVkuKTMgHQDgLG/sqgGAR8WT2SSIRwajBxlGChf413/9V7W1tenTn/70KR8Ti8XU0tLS54bcssVqhUUVCIAeBgcyANhkyexMS6zf76i3KvwBAM4gCQIALpbM9o0KeyQJYlaCsO8ApzzyyCO655579Mtf/lKlpaWnfNzq1atVXFxs3SorK22MEoOBoegA+mNWgnAgA8BQqzpvlEYNi+h4e7defu+Y0+EAgK+RBAEAF0tkK0GCHmmHZZ2+ZDggHPDoo4/qlltu0S9/+UstXLjwtI9dtWqVmpubrVttba1NUWIwpFJpbauhEgTAyYLmfDKWIgCGWCgY0HUXV0iiJRYAOM0bu2oA4FHxlLcGowc5fQmH/OIXv9CyZcv0i1/8Qh//+Mff9/HRaFRFRUV9bsgdexrb1NqVUH44qAvLC50OB4CL9FSlshYBMPTMlljP7ahXLJF0OBoA8C+SIADgYtZgdK+0wwrQDgvnrq2tTdXV1aqurpYk7du3T9XV1aqpqZGUqeJYunSp9fhHHnlES5cu1X333acFCxaovr5e9fX1am5udiJ82MCcBzKnskShIMtdAD0MDmQAsNH8iSNUXpSn1lhCL+5qdDocAPAtPhUCgEul0+mewei0wwIsmzdv1ty5czV37lxJ0sqVKzV37lzdddddkqS6ujorISJJP/7xj5VIJLR8+XJVVFRYtzvuuMOR+DH0tppD0SeWOBsIANcJWGsRhwMB4AuBgKFPzMq2xHqzzuFoAMC/Qk4HAADoX+88gVcGowcNsw83SRCcvSuvvPK0bUzWrl3b5/sNGzYMbUBwHYaiAzgV2mEBsNuS2WP105f26Y9vN6ijO6GCCFtxAGA3bxwtBgAPivc6ohj0yEyQAC0oAAyxE+3d2tvYLkmaW0kSBEBfZnEtaxEAdpk1vlgTRhaoM57U8283OB0OAPgSSRAAcKlEr1KQsEd62pszQeiGBWCobKvNVIGcN2aYRgyLOBwNALcJGKxFANjLMAzdMCczIP2JbYccjgYA/Mkbu2oA4EHJZM+nc+9UgtAOC8DQMoeiz5tAFQiAk7EWAeCE6+eOkyRtfPeojrbFHI4GAPyHJAgAuFQ81dMOK+SZJEjmK4PRAQyVLdZQdJIgAE5mrkXIgQCw0/ljhmv2+GIlU2k9/cZhp8MBAN8hCQIALpXIVoKEAoYMwxtJELOihY0HAEMhkUzpjdpmSQxFB9A/c03FgQwAdrshWw3y22qSIABgN5IgAOBSiWwliFdaYUk9Gw+0oAAwFHbWt6oznlRhXkhTxgx3OhwALhQMsBYB4IxPzBqrYMDQG7VN2tvY5nQ4AOArJEEAwKXMShCvDEWXaIcFYGiZrbDmThihgIcSyAAGD+2wADhlTGFUH7pgtCTpCapBAMBW3tlZAwCPSWQTBaGgdzbyggbtsAAMna01DEUHcHoMRgfgpE9mW2I9se2Q0rwOAYBtSIIAgEuZ7bC8MhRd6tWHmwU/gCFgVoIwDwTAqZhj1kiCAHDC1TPKVBAJquZ4h7bWNDkdDgD4BkkQAHCpnsHo3nmpDrDxAGCIHGnp0sETnTIMaXZlsdPhAHCpgDUY3eFAAPhSQSSkay8ql5SpBgEA2MM7O2sA4DFmOywvDUbvGUbqcCAAPMdshTWtrFCFeWGHowHgVuZahDY0AJxyQ7Yl1tNvHlZ3gowsANiBJAgAuFQie0Qx7KGZIFYfbrIgAAaZ2QrrElphATgNqlIBOO3y80dpTGFUJzri2ri70elwAMAXSIIAgEvFzXZYQe+8VAcCDCMFMDSseSAMRQdwGoZBVSoAZ4WCAf3V7LGSpN9W0xILAOzgnZ01APCYZMqcCeKlSpDMVzYeAAymWCKpHYdaJDEUHcDpWVWpHMgA4KBPZlti/fHtBrV0xR2OBgC8jyQIALhUPJVphxWiHRYAnNaOQy3qTqY0clhEE0cVOB0OABezDmSwFgHgoIvGFmlK6XDFEimt21HvdDgA4HkkQQDApZJmO6yAd16qOX0JYChsNeeBTBhhtboBgP4EaIcFwAUMw7CqQZ7YRkssABhq3tlZAwCPSZiVIJ5sh8XOA4DBs7UmOw+EVlgA3gfzyQC4hTkXZNPeY6pr7nQ4GgDwNpIgAOBSPYPRPZQECXD6EsDgSqfT1lD0SyaUOBsMANczD2SQAwHgtMqRBbps0kil09IT2w47HQ4AeBpJEABwqZ7B6N55qaYdFoDBdqipU0daYwoFDM0aX+J0OABcjrUIADf560syLbF+vaVWaV6XAGDIeGdnDQA8Jp704mD0zNckpSAABolZBTJjbJHyI0GHowHgdubYoCSbjQBc4OOzKpQXDui9xnZV1zY5HQ4AeBZJEABwqYSHK0HYdwAwWHoPRQeA99N7LcKpawBOK8wLa/HMCknSr7YcdDgaAPAu7+ysAYDH9CRBvFMJEmQYKYBBtrWmSZJ0CUPRAZyBoNGzrmI5AsANPjVvvCTpqTcOqyuedDgaAPAmkiAA4FIJD7bDMmiHBWAQdXQn9HZdiyRpHkkQAGcg0CsJwqEMAG7wgfNGaVxJvlq7EnrurXqnwwEATyIJAgAuZSYKwkHvvFT3VII4HAgAT3jzYLOSqbTKiqIaW5zndDgAcoDRa1nFegSAGwQChv6fbDXIr2mJBQBDwjs7awDgMfFk5pN50EPtsHr6cLPrAODcmUPR500cIcPwzmslgKFDJQgANzJbYr2056gONXU6HA0AeA9JEABwKbMdVthD7bDMfE6STQcAg2BbDUPRAQxM77MlJEEAuEXlyAJ94LyRSqelx6kGAYBBRxIEAFzKHIzuxUqQFP0nAJyjdDrNUHQAA9a3EsTBQADgL/zNvEpJ0q+3HqRyHgAGGUkQAHCpRCo7GD3gnZfqnnZYDgcCIOftP9ah4+3dioQCumhskdPhAMgRtMMC4FbXXVyuYZGgDhzr0Gv7jjsdDgB4ind21gDAYxLWYHTvVYLQDgvAuTLngVw8rljRUNDhaADkit4FtumUc3EAwF8qiIT08VkVkqRfbqYlFgAMJpIgAOBSCWswundeqs2nwslLAOdqa03PUHQAOFO9K0E4lAHAbW68NNMS65nth9XcGXc4GgDwDu/srAGAx3hzMLo5E8ThQADkvK0HzKHoJc4GAiCnGAxGB+Bil0wYoallw9UVT+nJ6kNOhwMAnkESBABcytOD0dl0AHAOWrvi2tXQKimzWQAAZ8owDKslFusRAG5jGIY+c9kESdIjr9YwIB0ABglJEABwKbMdVjjonZdq2mEBGAzVtU1Kp6XxI/JVWpTndDgAcox5KIPlCAA3+uTccYqGAtpZ36rq2ianwwEAT/DOzhoAeEw82zMq5MlKEHGqCcBZ23qgSRLzQACcHSpTAbhZSUFEH784MyD9F6/VOBwNAHgDSRAAcKmkB9thBXs14mbfAcDZ2lJjzgMhCQJg4AyrHZazcQDAqXxmQaYl1lNv1KmliwHpAHCuSIIAgEt5sh1WryQIpy8BnI1UKq1t2SQIlSAAzoZVCUIWBIBLzZ84QheUDldnPKknqw87HQ4A5Dzv7KwBgMckzHZYQe9Ughi93nWSJEEAnIU9jW1q7UooPxzUheWFTocDIAeZVbYcyADgVgxIB4DBRRIEAFzKrATx0kwQ2mEBOFdbD2SqQGZXFivkoUo5APahHRaAXPDXl4xTJBTQO3UteuNgs9PhAEBOG/Anx40bN2rJkiUaO3asDMPQE0888b7XbNiwQZdccomi0aimTJmitWvXnkWoAOAv8ZSZBPHOJh/tsACcqy0HmAcC4NwwGB1ALug9IP2RVw84HA0A5LYB76y1t7dr9uzZevDBB8/o8fv27dPHP/5xXXXVVaqurtaXvvQl3XLLLXruuecGHCwA+EnSi+2wej2VJMcvAZyFLcwDAXCOzCJb2ssAcLubsgPSf/fGYTV1dDscDQDkrtBAL1i8eLEWL158xo9fs2aNJk+erPvuu0+SNH36dL300kv63ve+p0WLFg301wOAb8ST3qsECQZ6V4I4GAiAnHSivVt7G9slSXOpBAFwlsxKkGTK4UAA4H3MnzhC0yuK9E5dix57vVb/70fOdzokAMhJQ76ztmnTJi1cuLDPfYsWLdKmTZtOeU0sFlNLS0ufGwD4TSLpvUqQPu2wyIIAGKBttZkqkPNGD9PIYRGHowGQqwIMRgeQIwzD0N9ePlGS9H9fOUA1PQCcpSFPgtTX16usrKzPfWVlZWppaVFnZ2e/16xevVrFxcXWrbKycqjDBADXSaa8Nxi991Nh4wHAQG090CRJuoRWWADOQcAajM5aBID7XT9nnEoKwjp4olPr32lwOhwAyEmu7LGyatUqNTc3W7fa2lqnQwIA21ntsIKufKk+K4ZhWHNBOMQEYKAYig5gMJiVqeRAAOSCvHBQ/+PSzGyQtS/vdzYYAMhRQ76zVl5eroaGvpnqhoYGFRUVKT8/v99rotGoioqK+twAwG/MSpCwhypBpJ6NB05fAhiIRDKlNw42SWIoOoBzw1oEQK75fNVEBQzp5feOaXdDq9PhAEDOGfIkSFVVldavX9/nvueff15VVVVD/asBIKfFU5mZIEGPJUGCbDwAOAs761vV0Z1UYTSkC0qHOx0OgBxmVqXSWx9ArhhXkq9rZpRLohoEAM7GgJMgbW1tqq6uVnV1tSRp3759qq6uVk1NjaRMK6ulS5daj7/tttu0d+9efeUrX9HOnTv1gx/8QL/85S915513Ds4zAACPSniwHZYk2mEBOCvbajKtsOZMKLGGGgPA2Qhag9EdDgQABuBvr5gkSfrt1kNq7og7GwwA5JgB76xt3rxZc+fO1dy5cyVJK1eu1Ny5c3XXXXdJkurq6qyEiCRNnjxZzzzzjJ5//nnNnj1b9913n376059q0aJFg/QUAMCbvDgYXerVgoKdBwADwDwQAIOlZyYIaxEAuWPB5JG6sLxQnfGkfrmZ2bkAMBChgV5w5ZVXnnaxuHbt2n6v2bZt20B/FQD4WjyZaYcVCnorCdJz+pKNBwBnbmtNkyTmgQA4d1SlAshFhmHoby+fpK8+vl3/+cp+feGDkz3XOhkAhoq3eqwAgIdYg9E92g6LPtwAzlRja0w1xztkGJl2WABwLhiMDiBXXT9nnEYUhFV7vFPrdtQ7HQ4A5Axv7awBgIeYlSBeO93Ts/HgcCAAcsbW7DyQqaWFKsoLOxwNgFxnLq1ozYmzsXr1al166aUqLCxUaWmpbrjhBu3ateu016xdu1aGYfS55eXl2RQxvCQ/EtTSqkmSpB9tfI+2fgBwhkiCAIBLJcxKkIC3XqrNpA4LdgBnaqs5D2RiibOBAPAEDmTgXLz44otavny5XnnlFT3//POKx+O65ppr1N7eftrrioqKVFdXZ90OHDhgU8TwmqVVExUNBfTmwWa9sve40+EAQE4Y8EwQAIA9zCSI12aCmKcvkyRBAJwhsxKEoegABgPtsHAu1q1b1+f7tWvXqrS0VFu2bNGHP/zhU15nGIbKy8uHOjz4wKjhUX16fqX+7ysH9KON76nq/FFOhwQAruet48UA4CEJczC6V9thpRwOBEBO6E6k9MbBZknSJQxFBzAIzCJbkiAYDM3NmfeokSNHnvZxbW1tmjhxoiorK3X99dfrrbfesiM8eNQtH5qsgCFt2NWoHYeanQ4HAFyPJAgAuFAqlbZaNIQ8Nhid05cABuLtuhZ1J1IqKQjrvNHDnA4HgAeYaxGWIjhXqVRKX/rSl3TFFVdo5syZp3zctGnT9PDDD+vJJ5/Uz3/+c6VSKV1++eU6ePDgKa+JxWJqaWnpcwNME0cN0ydmjZUk/fsLexyOBgDcz1s7awDgEYleTaq9Nxg985UkCIAzseVATyssw/DW6yEAZ5ivJUmGguAcLV++XDt27NCjjz562sdVVVVp6dKlmjNnjj7ykY/o8ccf15gxY/SjH/3olNesXr1axcXF1q2ysnKww0eOW/HRKTIMad1b9dpZT5IMAE6HJAgAuFCiV6+osNdmggQYRgrgzJnzQObRCgvAIOFABgbDihUr9PTTT+tPf/qTxo8fP6Brw+Gw5s6dqz17Tn2Cf9WqVWpubrZutbW15xoyPGZqWaGum1khSfr/qAYBgNMiCQIALtS7EiQU8NZLNe2wcK42btyoJUuWaOzYsTIMQ0888cT7XrNhwwZdcsklikajmjJlitauXTvkcWJwbD3AUHQAgytocCADZy+dTmvFihX67W9/qxdeeEGTJ08e8N+RTCa1fft2VVRUnPIx0WhURUVFfW7AX1rx0SmSpGe31+mdOqpBAOBUvLWzBgAekUj2ToJ4rBLEPH3JzgPOUnt7u2bPnq0HH3zwjB6/b98+ffzjH9dVV12l6upqfelLX9Itt9yi5557bogjxbk63NSpuuYuBQOGZlcWOx0OAI/omQnCWgQDt3z5cv385z/XI488osLCQtXX16u+vl6dnZ3WY5YuXapVq1ZZ33/729/WH/7wB+3du1dbt27V5z73OR04cEC33HKLE08BHjK9okgfn1WhdFq699l3nA7HFvFkSkdaunSkpUvH27uVSKbe/yIMWDqdds375NG2mJ7dXqf2WOJ9H9vcGVdzZ/ysY+9OpLTvaLvqmjvV2Z0c8PXxZEpH22I60tLFnofLhJwOAABwMnMhFzB62kd5Be2wcK4WL16sxYsXn/Hj16xZo8mTJ+u+++6TJE2fPl0vvfSSvve972nRokVDFSYGgdkKa3pFoQoiLFsBDA7DaoflbBzITT/84Q8lSVdeeWWf+//jP/5Df/u3fytJqqmpUaBXNfeJEyd06623qr6+XiNGjNC8efP08ssva8aMGXaFDQ/7x0UX6g9v1eu/3z2qF3c36iNTx9j6+5OptLXx3J1IKZ7M3BJn8SLbnUipuTOuluzfd6IjriOtXWpsjamxNaYjrTEdb+/uc41hSCX5YY0cFtGo4VGNHh7J/HlY5s/BQECxRFJd8ZT1tSueVCyRUiyeVCqd1vC8kIrywirMC6swL6Si/OzXvJAK88IKBwMKBQyFgoZCgYAK80LKCwfP+HnFkynVNXWpvqVLLZ1xtXcnsrFn3pB6f+Jv7UroREe3mjq61dSR+Tdo7uxWOi3lR4IqygurtCiq0sI8lRZGVVaUp9KiqIrzw+pOZJ7biY64jrdn/o7mzrgSqbRSqbRSaSmZTiuZyvzvk0imlUim1BpLqKUzrqaOuJqy//bNHXGl0mkV54dVXBBWcX5YJflhlRREVFIQVkl+RIV5IYWChgzDUNAwFAzI+nMgkDl0EA0FlRcOKJ2WYomkjrV3q6E5829R3xJTZ3ci8+8bDCgcMLJ/NpQfDqq8OE/H27v16y0HFUukVF6Up29+Yoauu7jc+rdLp9Pa+O5Rrdnwnt6ua1FzZ1xS5jBpeXGezh8zXBeWF+ri8cWaPb5E40fk95kz2NTRra01J7Rx91G9sveY9hxp6/P/3dHDIzpv9HBNHj1M540ZpvPGZP4sSUdau7TvaLvebWjTu0datbuhTY2tMevaUMDQ+WOG6yvXTtPHpped8f9fMDT4NAkALmS+6XqtFZbUc/qSYaSwy6ZNm7Rw4cI+9y1atEhf+tKXTnlNLBZTLNazgG1pob2AE7YeaJJEKywAg8tai7jkhCtyy5mcLt6wYUOf77/3ve/pe9/73hBFBL+bMKpAN1dN0k9f2qd7n3lHH5wyWsGzPEiXTqd1tK1bB461q7E1ppaueDYpkej157hauhJq6ujWiY64mjq6bU8qm3vY6XTmdiKbLHivsd22GCKhgJUYKM4Pq6QgrKLsnw0ZOt4eU31Ll2qPd6quuTNnE+/H2rt17C8ST04oiARV39Kl5Y9s1aWTRuiOj01VXXOnfrXloF7bd/ykxydSaR080amDJzr14u5G6/4RBWFNGj1MiWRax9u7daip86Rr88IBxZNpJVOZ/x6Oth3Xa/tP/h2nEzAyMexqaNUXf7ZZ188Zq3/+m1mKhs48eYbBRRIEAFzIbIcV8thQdKmnHZZbSmvhffX19Sor63vypqysTC0tLers7FR+fv5J16xevVr33HOPXSHiFLYwFB3AEDDPmLAWAeAVKz46Rb/aclC7Glr14417dfuV5w/o+jdqm/TbbYf09Jt1OtoWe/8L+jE8GlI0FLBO8oeDAQ3002woaGQqD/LDKsrLVCCYFQ9jCqMqLYpqzPCoRhREFAgYSqbSOtHRrePt3TraFtOxtsyfj7XFdDT7NZnKbGqbFQl54aCioczXvHBAhgyrEqK1K6HWrszXlq6e7xPJtOKplJKptOLZz+rdiZRVoXImoqGAKorzVJwfVkEkJMPIJHAkKa209efCvJBKCiIaURDOfs1UXgQMQ53xhE60x3WkNaYjrV060pL92hpTa1dC4aChvHBQI7LXjyiIqDg/U8liVmYEDEPBgKFw0FAwkKlwGZ4X6pPQKc7+bkM97aWashUpzb0qRlo6M9UiqVTmYEE6nUkcpNKZGaDJVFrdiZQ640kFDEORUEAl+WGVFeepvChzG54XylQOJdOZKqJUpjqlPZZQXXOXYomUPjl3nOZNHKEfbnhPa158T6/vP6HPPfSq9W8bCQb0+aqJ+pt54zVxVIEChqETHd2qPd6pd4+06u3DLdp+qFnv1LVkEmY1TX3+t5k4qkBXTBmtD18wWhePL9HY4jxJUktXQjXHOrT3aJv2NrZr79F27Tvapn2N7QoYhsYURVU5okBTy4brgrJCTS0r1MSRBSrKDyudTquhNab/fHm/fvLfe/Vk9WHNnzhCn6+aNMD/KjBYSIIAgAvFU5l2WF6bByL1HozucCDAaaxatUorV660vm9paVFlZaWDEflPVzyptw83S6ISBMDg6lmLsBgB4A0lBRF9/ePT9ZVfv6n7n9+lD08drYvGvv88tQPH2vXdZ97RH95usO4zDGlscb7Ksxv2mYREyKpyKMrLVDwU5Yc0alhUI4ZlNtvDQfu7GAQDhkYPj2r08KimlhXa8jvT6bTaYgkrOdDcEe9JFGS/ptJpjRoW0ZjCzCb5hJEFGj08mpOtrseWnHxgzCl3Xj1V/+OySv3b+nf1+NZDmjx6mBbPrNDfzB+vcX8RZ0VxviqK83XZ5JHWfbFEUjvrWlXX3KVoKKDheSFNLStUcX64399XnB/WxeOLdfH4s5lNaGhcSb5WXTddBZGQvvfH3dq09xhJEAeRBAEAFzJbRYUcWEgONVpQwG7l5eVqaGjoc19DQ4OKior6rQKRpGg0qmg0akd4OIXth5oVT6Y1pjCq8SPc8+ELQO6zkiDM0gXgIZ+aN15/fLtBf3i7QXc+Vq0nl39Q+ZH+W+/Ekyn9eONeff+P76o7mVIwYOgTsyp0w9xxqjpv1IDmXfiNYRjZ2SFhjeecju0qivO1+q9n6d5PXtxntseZiIaCml1Zotk2n22rOn+UvvdH6bV9J5ROpwccNwYHSRAAcKF40ruVIMEApy9hr6qqKj377LN97nv++edVVVXlUEQ4E1sOZFphXTKhhA8KAAaVubxiLQLASwzD0Oq/vlhba05od0ObPvfQq3r45ktVXND3lPvm/cd19+/e0luHMzPvPnTBaN31iRm6wKZKCmAw5NLng1njixUJBnS0Lab9xzqsweqwl/eOGAOAB5iVIE6UFA81ZoLgXLW1tam6ulrV1dWSpH379qm6ulo1NTWSMq2sli5daj3+tttu0969e/WVr3xFO3fu1A9+8AP98pe/1J133ulE+DhDWw8wDwTA0DArQViKAPCaUcOj+tHn56soL6QtB07ob9a8rF9trtW7Da363RuHdcvPNutv1mzSW4dbVJwf1vdunK3//MJlJECAIZQXDmp2Zaal1uv9DHGHPagEAQAXMoetBT1YCWKe2EjSggJnafPmzbrqqqus783ZHTfffLPWrl2ruro6KyEiSZMnT9YzzzyjO++8U9///vc1fvx4/fSnP9WiRYtsjx1nJp1Oa2uNWQlCEgTA4DJozQnAw+ZNHKFf3Xa5lj78qt490qYv//rNPj8PGNKNl1bqzqunqrQwz6EoAX+5dNJIvb7/hF7bf1yfvpRZk04gCQIALpQw22EFvZcEoR0WztWVV1552kqitWvX9nvNtm3bhjAqDKba45062tatcNDQzHFnM4gQAE7NLLRlLQLAq6aVF+p3Kz6o/3rlgDbsbtTOulZNKy/U/Ekj9NkFEzSllMoPwE6XTh4pbXhPr++nEsQpJEEAwIWswegerAShHRaA97OlJvPh4KKxxQzmBDDorMHoLEUAeFhZUZ5WXjNNK6+Z5nQogO/NmzhChiEdONahIy1dKi2iCstu3ms2DwAeELeSIN57maYdFoD3s/VAkyTmgQAYGj0zQciCAACAoVeUF9b08iJJ0mtUgzjCe7trAOABZjussBfbYRm0wwJwelsOMA8EwNDJLkWUohQEAADY5OJsm9+9je0OR+JPJEEAwIUSKe8ORg/QhxvAabTHEtpZ3yJJumRiibPBAPCkgDUY3eFAAACAb+RHMm1+uxO0xXACSRAAcKFE9lN5KOi9l+kAlSAATuON2ial0tLY4jxVFOc7HQ4ADzIPmdAOCwAA2CUayuzvxBJJhyPxJ+/trgGAByRS3m2HZSVBOPwAoB9ba7KtsJgHAmCIWO2wSIIAAACbRLJJECpBnEESBABcyKwECXpwMLrZ4SvJxgOAfjAPBMBQ66lKdTgQAADgGz2VICRBnOC93TUA8ACrEsSDM0FoQQHgVFKptJUEmT+JJAiAoRGgEgQAANiMShBnkQQBABfy8mB0g9OXAE5hT2ObWroSyg8HNb2iyOlwAHhUT2tOFiMAAMAekezM11iSJIgTSIIAgAuZ7bDCnhyMnvmaZOMBwF/YvD9TBTKnssSTr38A3CEQ4EAGAACwVzQclCTF4iRBnMCnSwBwIbMSJOTBwei0wwJwKpsPHJdEKywAQ4t2WAAAwG5mJUg3lSCOIAkCAC6UyL4p0g4LgJ+Y80DmTSQJAmDoMBgdAADYLRo2Z4IkHY7En0iCAIALmZUg4YD3XqbNjQfaYQHorbE1pgPHOmQY0iUkQQAMIXMtQlUqAACwizUThMHojvDe7hoAeIA5E8ST7bBoQQGgH1uyrbCmlRWqKC/scDQAvMxgPhkAALBZJGRWgpAEcQJJEABwoUQq86YY8mA7rJ4WFGw8AOhhDkWnFRaAoUY7LAAAYLdoKDsYnSSII0iCAIALxa1KEO+9TDMTBEB/NmfngTAUHcBQM2eu0Q4LAADYhUoQZ3lvdw0APCDp4UoQM69DJQgAU1c8qbcON0uS5k8c6XA0ALzOoDUnAACwWZQkiKNIggCAC8U9PBPEakFBKQiArDdqmxRPplVaGNX4EflOhwPA42iHBQAA7GYmQWKJpMOR+BNJEABwIXNQZyjgvZfpQICNBwB99W6FZbbMA4ChEmAwOgAAsBntsJzlvd01APAAbw9Gz3ylBQUA05YD5lB0WmEBGHpmJQgzQQAAgF3MwejdSZIgTiAJAgAu5OXB6LTDAtBbKpW2kiDzJzIUHcDQox0WAACwm1kJEk+m2Q9xgPd21wDAA3raYXmxEoSNBwA93mtsU3NnXPnhoGaMLXI6HAA+0LMWYTECAADsYSZBJKpBnEASBABcKJ59Q/TyYPQkGw8A1DMPZHZlscIerH4D4D49rTmdjQMAAPhHtFcSJBYnCWI3PmkCgAslPN0OK/OV05cAJGnzfrMVFvNAANgjEGAmCAAAsFcoYCh7JlSxZNLZYHzIe7trAOABCQ+3wwpaGw8OBwLAFbYcOC5JmjeJeSAA7GFuQCQpBQEAADYxDEOR7EHX7gSVIHYjCQIALpRIZdtheTAJYpjtsNh4AHyvsTWm/cc6ZBjSJRNIggCwR5D5ZAAAwAFmS6wYSRDbkQQBABcyEwRe7I9vPiXaYQHYkp0HMrW0UMX5YYejAeAX5nwy2mEBAAA7RUJBSVSCOMF7u2sA4AHmYPSgBytBejYeHA4EgONohQXACQbzyQAAgAPMShCSIPYjCQIALmQORg8HvZcEoR0WANPmA+ZQdJIgAOwToB0WAABwAO2wnEMSBABcyByMHgx472W6pw83Ow+An3XFk9pxqFmSNH/iSIejAeAnZqFtkrUIAACwUYRKEMd4b3cNADzAGozuwUqQgNWCwtk4ADjrzYPNiifTGlMYVeXIfKfDAeAjZrtRZoIAAAA79VSCJB2OxH9IggCAC1ntsDxYCRLIbjykyIIAvrY5Ow9k/sQRVps8ALCD+ZqT4hAmAACwEZUgzvHe7hoAeEBPOyzvbQwGaIcFQNKW/Zl5IPOYBwLAZqxFAACAE6wkSJIkiN1IggCACyWyb4heHIxOH24AqVRaW2qyQ9EnMQ8EgL1ozQkAAJwQDQUlSbE4SRC7kQQBABcyK0FCQe+9TJunL8mBAP61p7FNTR1x5YeDumhskdPhAPAZKkEAAIATItk9nhiVILbz3u4aAHiAORMk5MV2WAE2HgC/e21fZh7I3AklCnsw2QvA3ViLAAAAJ0TDzARxCp86AcCFEtlJnSEvt8OiBwXgW2YS5LLJtMICYD/aYQEAACdYlSCJpMOR+A9JEABwIasdlgcrQcxh7xy+BPwpnU7r9f3ZJAjzQAA4oKc1J4sRAABgH2swOpUgtiMJAgAu1NMOy3sv0wZ9uAFfO3iiU3XNXQoFDM2dMMLpcAD4kGFVgrAWAQAA9rEGo5MEsZ33dtcAwANohwXAq8xWWBePL1Z+JOhwNAD8yKwEYS0CAADsRCWIc0iCAIALebkSJGhVgjgcCABH0AoLgNOCAdYiAADAfiRBnOO93TUAyHHpdLpnJognK0Howw342WvZJMilJEEAOMSsSmUtAgAA7BQNMRjdKSRBAMBlerdmCHuwEsTsw51k4wHwncbWmPY2tkuS5k9iHggAZxhUpQIAAAdEqQRxjPd21wAgxyV6fSIPerAShBYUgH9tzlaBXFheqJKCiMPRAPCrgJUEYTECAADsYyVBkiRB7EYSBABcJt7rzTAU8F4SxNp4IAsC+A6tsAC4gbm8Yi0CAADsZM4EicVJgtiNJAgAuEzvdlheTIKY7bA4fQn4z2v7skmQySRBADgnQDssAADggAiVII4hCQIALhNP9mqH5cEkSE87LHYeAD9p7YrrnboWSdJlVIIAcFCAtQgAAHBANBSURCWIE0iCAIDLmJUg4aBhDe70kp52WA4HAsBWWw6cUCotTRhZoPLiPKfDAeBjVjssciAAAMBGkWC2HRaVILYjCQIALmPOBPFiFYjEMFLAr15nHggAlzDXImnWIgAAwEZWO6wESRC7kQQBAJdJmJUgAW++RAeYCQL4kjkPZAHzQAA4zCy0TVIKAgAAbBQ1B6Mnkg5H4j/e3GEDgByWzPaJCga9XQmSZN8B8I2ueFJv1DZLYig6AOdRlQoAAJxAJYhzSIIAgMuYg9FDHq0EMdt80YIC8I9tNU3qTqY0pjCqSaMKnA4HgM/1rEUcDgQAAPgKSRDneHOHDQByWCLZMxjdiwzaYQG+s2nvMUlS1XmjZBjefG0DkDtozQkAAJwQDQUlSTGSILYjCQIALpNI+WMwepL3fMA3Nr13VJJ0+fmjHI4EAGQlYxkJAgAA7BSlEsQxJEEAwGWswehBb75E0w4L8JeO7oSqa5skSVUkQQC4ADNBAACAExiM7hxv7rABQA5LWDNBvFkJYnbCSXL8EvCFzftPKJ5Ma1xJviaMZB4IAOdZ7bBYiwAAABuZM0FSaSlBewxbkQQBAJfxSzssTl/iXDz44IOaNGmS8vLytGDBAr322munffwDDzygadOmKT8/X5WVlbrzzjvV1dVlU7T+Zs4D+QDzQAC4RIB2WAAAwAFmEkSSukmC2IokCAC4TM9gdG++RPe0w3I4EOSsxx57TCtXrtTdd9+trVu3avbs2Vq0aJGOHDnS7+MfeeQRffWrX9Xdd9+td955Rw899JAee+wxfe1rX7M5cn96+b1MEoR5IADcggMZAADACZFe+zyxOEkQO3lzhw0Acpg5E8S7lSCZr0k2HnCW7r//ft16661atmyZZsyYoTVr1qigoEAPP/xwv49/+eWXdcUVV+imm27SpEmTdM011+gzn/nM+1aP4Ny1dMW1/WCTJOaBAHCPQPZTMJUgAADATqFgwNrroRLEXiRBAMBlzL6Q4aBXkyCcvsTZ6+7u1pYtW7Rw4ULrvkAgoIULF2rTpk39XnP55Zdry5YtVtJj7969evbZZ3Xddded8vfEYjG1tLT0uWHgXt93XKm0NGlUgcaW5DsdDgBI6lmLpFmLAAAAm5nVIN0JkiB2CjkdAACgr3jKHIzuzTy1lQTh/R5n4ejRo0omkyorK+tzf1lZmXbu3NnvNTfddJOOHj2qD37wg0qn00okErrttttO2w5r9erVuueeewY1dj8yW2FVnT/a4UgAoAdVqQAAwCnRcECd8aRiiaTTofiKN3fYACCHJbPZgRCVIMCg2LBhg+6991794Ac/0NatW/X444/rmWee0Xe+851TXrNq1So1Nzdbt9raWhsj9o5NVhKEVlgA3KPnQAZrEQAAYC+zEiRGJYitqAQBAJeJJ81KEI8mQaw+3Gw8YOBGjx6tYDCohoaGPvc3NDSovLy832u++c1v6vOf/7xuueUWSdLFF1+s9vZ2/d3f/Z2+/vWvK9BP1VU0GlU0Gh38J+AjJ9q79XZdpo1Y1XkkQQC4R087LIcDAQAAvhMNkwRxApUgAOAySbMdVtCbL9E9lSAOB4KcFIlENG/ePK1fv966L5VKaf369aqqqur3mo6OjpMSHcFgUBL94IfSK3szVSAXlA7XmEISSgDcg6pUAADgFGaCOINKEABwGXMwumcrQWhBgXO0cuVK3XzzzZo/f74uu+wyPfDAA2pvb9eyZcskSUuXLtW4ceO0evVqSdKSJUt0//33a+7cuVqwYIH27Nmjb37zm1qyZImVDMHg25RNglxOKywALpNdinAgAwAA2C4SynwGJQliL5IgAOAyVjssj1aCBGmHhXN04403qrGxUXfddZfq6+s1Z84crVu3zhqWXlNT06fy4xvf+IYMw9A3vvENHTp0SGPGjNGSJUv03e9+16mn4AsMRQfgVoHsQRMGowMAALtFQ7TDcgJJEABwGasdlkcrQYzs8cskxy9xDlasWKEVK1b0+7MNGzb0+T4UCunuu+/W3XffbUNkkKQjrV3ac6RNhiF94LyRTocDAH0ErZkgrEUAAIC9IiHaYTnBm8eMASCHxVPebocVZBgp4Hkv78lUgUwvL1JJQcThaACgrwDtsAAAgEPMSpDuZNLhSPyFJAgAuEzS4+2wGEYKeN+GXUckSR+ZNsbhSADgZAZrEQAA4BCrHVacShA7eXOHDQByWNzz7bAyX+nDDXhTKpXWxnePSpI+MpUkCAD3MZdY6TQtsQAAgL2sdlhJkiB2IgkCAC6TyL4RhoLeTIIEA+bpS4cDATAkth9q1vH2bg2PhjRv4ginwwGAk5hVqRLrEQAAYK9oKCiJShC7kQQBAJcxB4aHPd4Oi5OXgDdt2NUoSbpiyijPvo4ByG2BQO8kCOsRDMzq1at16aWXqrCwUKWlpbrhhhu0a9eu973uV7/6lS688ELl5eXp4osv1rPPPmtDtAAAt4kEqQRxAp9MAcBl4tmZIEGPtsMyn1aSo5eAJ724OzMP5MpppQ5HAgD9673EIgmCgXrxxRe1fPlyvfLKK3r++ecVj8d1zTXXqL29/ZTXvPzyy/rMZz6jL37xi9q2bZtuuOEG3XDDDdqxY4eNkQMA3MBshxVLkASxU8jpAAAAfSVSmTfCsFeTILTDAjyrqaNb1bVNkpgHAsC9erfDIgeCgVq3bl2f79euXavS0lJt2bJFH/7wh/u95vvf/76uvfZaffnLX5Ykfec739Hzzz+vf//3f9eaNWuGPGYAgHtYg9ETSYcj8RcqQQDAZRIpsxLEmy/RffpwkwkBPOXF3Y1KpaWpZcM1tiTf6XAAoF99Z4KwFsG5aW5uliSNHDnylI/ZtGmTFi5c2Oe+RYsWadOmTae8JhaLqaWlpc8NAJD7rMHoVILYyps7bACQw7w+GJ0WFIB3/eHtBknSRy8sczgSADi1XjkQ2nPinKRSKX3pS1/SFVdcoZkzZ57ycfX19Sor6/veWFZWpvr6+lNes3r1ahUXF1u3ysrKQYsbAOAcs/U5h0LtRRIEAFwmYQ1G92gSpM8wUgcDATCouuJJ/WlnZh7I4pnlDkcDAKfWtxLEwUCQ85YvX64dO3bo0UcfHfS/e9WqVWpubrZutbW1g/47AAD2M9chSQ6F2oqZIADgMomkj9ph8aYPeMZ/v3tUHd1JVRTnadb4YqfDAYBTCgZ6zwRhLYKzs2LFCj399NPauHGjxo8ff9rHlpeXq6Ghoc99DQ0NKi8/9aGBaDSqaDQ6KLECANzDXIck6YZlq7PaYXvwwQc1adIk5eXlacGCBXrttddO+/gHHnhA06ZNU35+viorK3XnnXeqq6vrrAIGAK+zBqN7tBIkSBIE8KR1OzItPRZdVC7D8ObrFwBv6Nua07k4kJvS6bRWrFih3/72t3rhhRc0efLk972mqqpK69ev73Pf888/r6qqqqEKEwDgUrTDcsaAkyCPPfaYVq5cqbvvvltbt27V7NmztWjRIh05cqTfxz/yyCP66le/qrvvvlvvvPOOHnroIT322GP62te+ds7BA4AX9VSCeHMT0WDjAfCceDKlP76TOeF6La2wALicwYEMnIPly5fr5z//uR555BEVFhaqvr5e9fX16uzstB6zdOlSrVq1yvr+jjvu0Lp163Tfffdp586d+ta3vqXNmzdrxYoVTjwFAICDaIfljAEnQe6//37deuutWrZsmWbMmKE1a9aooKBADz/8cL+Pf/nll3XFFVfopptu0qRJk3TNNdfoM5/5zPtWjwCAX1kzQXzQDothpIA3vLr3uJo74xo1LKJLJ410OhwAeF/mWRNOYWKgfvjDH6q5uVlXXnmlKioqrNtjjz1mPaampkZ1dXXW95dffrkeeeQR/fjHP9bs2bP161//Wk888cRph6kDALwpmN3qYQ1irwHNBOnu7taWLVv6nGgIBAJauHChNm3a1O81l19+uX7+85/rtdde02WXXaa9e/fq2Wef1ec///lT/p5YLKZYLGZ939LSMpAwASCnxbONIUNebYdFH27Acx7fdlCSdM1FZZ6tYgPgLQHDUCqdpioVA3Ym69cNGzacdN+nPvUpfepTnxqCiAAAuYRKEGcMKAly9OhRJZNJlZWV9bm/rKxMO3fu7Peam266SUePHtUHP/hBpdNpJRIJ3Xbbbadth7V69Wrdc889AwkNADzDrI7w6kYifbgBb2nujOvZ7ZnTrn8zr9LhaADgzAQChpRK0w4LAADYqmcwOmsQOw15r5UNGzbo3nvv1Q9+8ANt3bpVjz/+uJ555hl95zvfOeU1q1atUnNzs3Wrra0d6jABwDXMmSDhoDfbYRm0wwI85XdvHFZXPKWpZcN1yYQSp8MBgDNitcMiCQIAAGxEEsQZA6oEGT16tILBoBoaGvrc39DQoPLy/odgfvOb39TnP/953XLLLZKkiy++WO3t7fq7v/s7ff3rX1egn5730WhU0Wh0IKEBgGckUtl2WB6tBJEyb/rJVJp2WIAHPPpajSTpxksn9ElyAoCbma0oWIoAAAA7kQRxxoCOGUciEc2bN0/r16+37kulUlq/fr2qqqr6vaajo+OkREcwGJREL3gA6I85GN2rM0GkntOX9MAEctuOQ81663CLIsGA/nruOKfDAYAzZiZBqAQBAAB2CrIGccSAKkEkaeXKlbr55ps1f/58XXbZZXrggQfU3t6uZcuWSZKWLl2qcePGafXq1ZKkJUuW6P7779fcuXO1YMEC7dmzR9/85je1ZMkSKxkCAOgRz7bDCvVTKecVmdPiDCMFct1P/3uvJGnRzHKNGBZxOBoAOHNm4RqnMAEAgJ0CVII4YsBJkBtvvFGNjY266667VF9frzlz5mjdunXWsPSampo+lR/f+MY3ZBiGvvGNb+jQoUMaM2aMlixZou9+97uD9ywAwEOSfmiHZZ584E0fyFlvHW7Wk28cliT93YfOczgaABgYsxUFSxEAAGAncz8kyRrEVgNOgkjSihUrtGLFin5/tmHDhr6/IBTS3XffrbvvvvtsfhUA+I45GD3k0cHoEsNIAS/4p9/vVDot/dXssbp4fLHT4QDAgPTMBGEtAgAA7GMdxOAkhq28u8MGADkqblaCeHkmCKcvgZz23+826r/fPapw0NA/XDPN6XAAYMB6DmQ4GwcAAPAX2mE5gyQIALhM0poJ4uEkCIPAgJx14Fi77nysWpL02QUTNWFUgbMBAcBZMFiLAAAAB/S0w2INYieSIADgMvGU9wejW6cvOfkA5JQjrV36/EOv6Whbt2ZUFOl/XTPV6ZAA4KwEGIwOAAAcYHY+Zz/EXmc1EwQAMHTMD+NhD7fDYhgpkFuOtHbpl6/X6scb96qlK6HKkfla+4VLVZgXdjo0ADgrQWsmiMOBAAAAXwlQCeIIkiAA4DLxZGYmSNDD7bBoQQG/e+qNw/rTriM9d6T7/WPm+7/47yTd52c65c/+8tqT/ms76dq+d3TFU2rpjKv2RIcaWmLW/ReWF2rN5+aptDDvL/9GAMgZrEUAAIATGIzuDJIgAOAyiaRZCeL9dli0oIBf7TjUrMe3HnI6jDNmGNL08iL9vx85T5+YNdbTSVoA/mB2HSUJAgAA7GQNRmcNYiuSIADgMmZiwMubjLSggN9ddWGpRg2P9LnPUN//5o33eQkw/uIBf/nwv7z+5J+f/At63xUJBlScH1ZpUVQXlhdpWJRlIwDvCBi05gQAAPazBqOnHA7EZ/g0CwAuE09l3glDHp4JYtADEz73gfNG6QPnjXI6DADwrQDtsAAAgANoh+UM7/ZaAYAclEqlreqIcMC7L9G0oAAAAE4yC27ZgAAAAHZiMLozvLvDBgA5yKwCkaSghytBetph8aYPAADsRzssAADgBCpBnEESBABcxByKLnm8EoQemAAAwEEBDmQAAAAHBLNbPVSC2Mu7O2wAkIMSvU4CeHkmSCBAH24AAOAcw2yHxVIEAADYqOdQKIsQO5EEAQAXSfQqjQgFPJwEsTYeeNMHAAD2ox83AABwgtkOiySIvUiCAICLmJUgwYAhw/ByEsTsgelwIAAAwJeCVKUCAAAHkARxBkkQAHCR3kkQL+sZRsqbPgAAsJ+51GImCAAAsBMHMZxBEgQAXMRshxX2ehIk++7Dmz4AAHCCQVUqAABwQJCZII4gCQIALmJWgoSC3n55phIEAAA4iflkAADACQHaYTnC27tsAJBjEslsEsTrlSCcvgQAAA7iQAYAAHBC0FqDOByIz5AEAQAXiWfbYYWCXk+CZL4m2XgAAAAOCATYgAAAAPZjMLozSIIAgIuYb4KhgLdfns03fYaRAgAAJ9AOCwAAOMFqh8UaxFbe3mUDgByTSPmjEsSg/BMAADgowFoEAAA4wGqHxSLEViRBAMBF4r6ZCZL5SvknAABwgpkEoSoVAADYyWz8QSWIvUiCAICL+K0dFi0oAACAEwwOZAAAAAcErYMYHMawk7d32QAgx/hnMHrPmz4AAIDdaIcFAACcEOzV+YPDGPYhCQIALmJVggS9/fJszgThDR8AADiBqlQAAOCEQO8kCOsQ23h7lw0AcoxfZoKYhS5sPAAAACeYSy3aUAAAADuZ7bAkKZVyMBCfIQkCAC6SyL4Dej0J0tOCgo0HAABgP4N2WAAAwAFBKkEcQRIEAFykpx2Wt5MgbDwAAAAnBahKBQAADggYzARxAkkQAHCRnnZY3n55NkeesPEAAACcYFWlsvkAAABs1LsShHWIfby9ywYAOSaRzLTDCnu8EoSNBwAA4KRAgKpUAABgv97dz2mHZR+SIADgIonsJ/Gg12eCsPEAAAAcxHwyAADgBMMwrEQI7bDsQxIEAFzErAQJBb398szGAwAAcFLPTBBn4wAAAP5jtkAnCWIfb++yAUCOMStBwl6vBOHUAwAAcJB5ICPNgQwAAGAzcwwseyL2IQkCAC7S0w7L2y/PQWvjweFAAACALxkcyAAAAA4J0h3Ddt7eZQOAHOOXwegGb/g4Rw8++KAmTZqkvLw8LViwQK+99tppH9/U1KTly5eroqJC0WhUU6dO1bPPPmtTtAAAt+nZfHA4EAAA4DvmnFQOY9gn5HQAAIAevhmMbp6+JAmCs/DYY49p5cqVWrNmjRYsWKAHHnhAixYt0q5du1RaWnrS47u7u3X11VertLRUv/71rzVu3DgdOHBAJSUl9gcPAHAF5pMBAACnmHs+rEPsQxIEAFwkkczOBPH4YHTzDZ/3e5yN+++/X7feequWLVsmSVqzZo2eeeYZPfzww/rqV7960uMffvhhHT9+XC+//LLC4bAkadKkSXaGDABwGbPzKDNBAACA3cyK1GwzENjA27tsAJBjzEqQkMcrQQyD0k+cne7ubm3ZskULFy607gsEAlq4cKE2bdrU7zW/+93vVFVVpeXLl6usrEwzZ87Uvffeq2QyecrfE4vF1NLS0ucGAPAOg3ZYAADAIbTDsh9JEABwEXMmSNDjM0HMHA+lnxioo0ePKplMqqysrM/9ZWVlqq+v7/eavXv36te//rWSyaSeffZZffOb39R9992n//2///cpf8/q1atVXFxs3SorKwf1eQAAnBVgMDoAAHAIg9HtRxIEAFzErAQJB7z98tzT/9LhQOALqVRKpaWl+vGPf6x58+bpxhtv1Ne//nWtWbPmlNesWrVKzc3N1q22ttbGiAEAQ83cfKAdFgAAsFuQShDbMRMEAFwkkcpUgoQ8XwmSTYLwho8BGj16tILBoBoaGvrc39DQoPLy8n6vqaioUDgcVjAYtO6bPn266uvr1d3drUgkctI10WhU0Wh0cIMHALgG7bAAAIBTzHOvSQ5j2MbbR40BIMeYg9G9PhPETILwho+BikQimjdvntavX2/dl0qltH79elVVVfV7zRVXXKE9e/YoleqZOrd7925VVFT0mwABAHhfgDYUAADAIUEOhtqOJAgAuEjcTIIEvf3ybFa6UPqJs7Fy5Ur95Cc/0c9+9jO98847uv3229Xe3q5ly5ZJkpYuXapVq1ZZj7/99tt1/Phx3XHHHdq9e7eeeeYZ3XvvvVq+fLlTTwEA4LCe+WTOxgEAAPyHwej2ox0WALhI0myH5fFKEPP5xZOp93kkcLIbb7xRjY2Nuuuuu1RfX685c+Zo3bp11rD0mpoaBXrN1amsrNRzzz2nO++8U7NmzdK4ceN0xx136B//8R+degoAAIcFAlSCAAAAZwTpjmE7kiAA4CLxlD/aYZmVLmb7L2CgVqxYoRUrVvT7sw0bNpx0X1VVlV555ZUhjgoAkCuYTwYAAJxiDkZPcS7UNt7utwIAOSbpk3ZY4ewbfoJ3fAAA4ADaYQEAAKcwJ9V+3t5lA4Ack/BJO6xwKPP2053gDR8AANiPwegAAMApPZUgrEPsQhIEAFzEN4PRqQQBAAAOMs+bpEmCAAAAmzEY3X7e3mUDgByT9MlMkDAzQQAAgIMM2lAAAACHBLNbPgmSILYhCQIALhJPZtthBb2dBDGfn/l8AQAA7BS0TmA6HAgAAPCdUCCzJU9bTvuQBAEAF0lYlSDefnk2K0FIggAAACfQixsAADjF3PKhHZZ9vL3LBgA5JuGbdljmTBDe8AEAgP0CtMMCAAAOsQ5jsA6xDUkQAHCRhF/aYQWoBAEAAM7JFqVSCQIAAGxnHcZgHWIbkiAA4CLmG6DZLsqrGIwOAACcRCUIAABwSs9sMtYhdvH2LhsA5BizMiLok3ZYVIIAAAAnmK1Hac0JAADsFjRoh2U3kiAA4CIJqxLE20mQkDUYnTd8AABgPwajAwAApwSsShCHA/ERkiAA4CJme6hgwNsvz2Hr9CXv+AAAwH4B2lAAAACHBGnLaTtv77IBQI4x20NFvD4TJEQlCAAAcA5tKAAAgFOoSLWft3fZACDHWEmQkMfbYQWYCQIAAJxDJQgAAHAK6xD7kQQBABcxKyNCXm+Hla10SVAJAgAAHNDThsLhQAAAgO+YY2CpSLWPt3fZACDHdGcrI8x2UV4VCjITBAAAOIc2FAAAwClUgtjP27tsAJBjEmYSJOjtdlhmJUh3giQIAACwH5sPAADAKQxGtx9JEABwiWQqLfNzuOcHo2fbfSXYeAAAAA5g8wEAADiFilT7eXuXDQBySO8h4SGPJ0Gsdlg04gYAAA4IUgkCAAAc0lOR6nAgPuLtXTYAyCHdvd79vN4Oy0yCxFMppTmBCQAAbEYSBAAAOIWKVPuRBAEAl+hdFWG2i/Iqs91XOs3mAwAAsJ9ZdJti8wEAANiMdlj28/YuGwDkELMdVihgWKWRXtW73RdzQQAAgN0CBpUgAADAGeY6hP0Q+5AEAQCX6E5kkiBhj88DkTKJHlOcJpgAAMBmtMMCAABOMVuEU5FqH+/vtAFAjrAqQTw+D0Tqm+hhODoAALCb2YubzQcAAGA3KlLtRxIEAFwink0GRHxQCRIMGDKLQagEAQAAdgtQCQIAABxibvuwDrGP93faACBHmMkAP7TDknrmgsR50wcAADazBpKyDAEAADajItV+/thpA4AcYCVBQt5vhyVJ4ezmQ4JKEAAAYDNmguBcbNy4UUuWLNHYsWNlGIaeeOKJ0z5+w4YNMgzjpFt9fb09AQMAXIWKVPuRBAEAlzDbYYUD/nhptipBmAkCAABsFqQXN85Be3u7Zs+erQcffHBA1+3atUt1dXXWrbS0dIgiBAC4GZUg9gs5HQAAIMNv7bDCVhKEShAAAGAvKkFwLhYvXqzFixcP+LrS0lKVlJQMfkAAgJxCJYj9/LHTBgA5wHftsIJmOyze9AEAgL0CZiUIJzBhozlz5qiiokJXX321/vznP5/2sbFYTC0tLX1uAABv6DmM4XAgPkISBABcwmqH5ZNKkFA2CRJP8a4PAADsZQ1G5wQmbFBRUaE1a9boN7/5jX7zm9+osrJSV155pbZu3XrKa1avXq3i4mLrVllZaWPEAIChRDss+9EOCwBcwnftsLKzT+IJkiAAAMBe5nKLShDYYdq0aZo2bZr1/eWXX6733ntP3/ve9/R//+//7feaVatWaeXKldb3LS0tJEIAwCNoh2U/kiAA4BI9SRC/tMPK7D4keNMHAAA2CzAYHQ677LLL9NJLL53y59FoVNFo1MaIAAB2Mbd9OIxhH38cNwaAHNCd8FcliNUOiyaYAADAZrTDgtOqq6tVUVHhdBgAAAewDrEflSAA4BJmRYR/kiDZShAGowMAAJsxGB3noq2tTXv27LG+37dvn6qrqzVy5EhNmDBBq1at0qFDh/Sf//mfkqQHHnhAkydP1kUXXaSuri799Kc/1QsvvKA//OEPTj0FAICDaIdlP5IgAOASZkVExCdJkHCAShAAAOAMsyI1xTIEZ2Hz5s266qqrrO/N2R0333yz1q5dq7q6OtXU1Fg/7+7u1v/6X/9Lhw4dUkFBgWbNmqU//vGPff4OAIB/MBjdfiRBAMAlzHZYIZ/NBIlz8gEAANjM3HxIkAXBWbjyyiuVPs3G1dq1a/t8/5WvfEVf+cpXhjgqAECuoBLEfv44bgwAOSCe9Fs7rOzmA5UgAADAZubmQyqt025mAwAADLag1ZbT4UB8xB87bQCQA8y2UH5JgoSZCQIAABxibj5ImUQIAACAXRiMbj9/7LQBQA5IWDNB/NEOK5R90++mEgQAANjMrASRaEUBAADsZa5DaMtpH5IgAOAS3T5rhxUOmZUgvOkDAAB7BQO9K0FIggAAAPtYg9HZDrGNP3baACAHmO2wQn5JglgnH9h4AAAA9urdDotKEAAAYCfzMEaSgxi28cdOGwDkgLjf2mFlkz1xZoIAAACbBXp9EmYDAgAA2MlKgnAQwzYkQQDAJeJ+a4eVTfbEaYcFAABs1mcwOhsQAADARua2Dy057eOPnTYAyAFmMsCcleF1ZrKHmSAAAMBuvWeC0JoTAADYKWBQCWI3f+y0AUAOsGaCBHzSDivbhyLOmz4AALCZYRgyl1xUggAAADvRDst+JEEAwCWsmSC+qQTJDkanEgQAADiAoaQAAMAJZltO2mHZxx87bQCQA7oTfpsJwmB0AADgHFpRAAAAJwSoBLGdP3baACAHJFLZmSA+SYKEGIwOAAAcZFaCpFiKAAAAG1lrEHIgtvHHThsA5ABrMHrQHzNBegaj864PAADsZ7aioB0WAACwE9Wo9iMJAgAuEfdZOyxzAHyc45cAAMABtKIAAABOYDC6/c5qp+3BBx/UpEmTlJeXpwULFui111477eObmpq0fPlyVVRUKBqNaurUqXr22WfPKmAA8KrupL/aYTETBAAAOKmnFQVrEQAAYB8Go9svNNALHnvsMa1cuVJr1qzRggUL9MADD2jRokXatWuXSktLT3p8d3e3rr76apWWlurXv/61xo0bpwMHDqikpGQw4gcAz+iZCeKXdliZ55lgJggAAHAArSgAAIATAtmzr6xB7DPgJMj999+vW2+9VcuWLZMkrVmzRs8884wefvhhffWrXz3p8Q8//LCOHz+ul19+WeFwWJI0adKkc4saADzIbIcV8UklSIhKEAAA4KAQrSgAAIADqEa134B22rq7u7VlyxYtXLiw5y8IBLRw4UJt2rSp32t+97vfqaqqSsuXL1dZWZlmzpype++9V8lk8twiBwCPMQejh/ySBDFnglAJAgAAHEA/bgAA4IQg1ai2G1AlyNGjR5VMJlVWVtbn/rKyMu3cubPfa/bu3asXXnhBn/3sZ/Xss89qz549+vu//3vF43Hdfffd/V4Ti8UUi8Ws71taWgYSJgDkpJ6ZIP5ohxUJZZI9CQajAwAAB1itKDiFCQAAbBTgIIbthvy4cSqVUmlpqX784x9r3rx5uvHGG/X1r39da9asOeU1q1evVnFxsXWrrKwc6jABwHFxnw1GDwVoh4Wz9+CDD2rSpEnKy8vTggUL9Nprr53RdY8++qgMw9ANN9wwtAECAFzPGkrKBgQAALBRz2B0hwPxkQHttI0ePVrBYFANDQ197m9oaFB5eXm/11RUVGjq1KkKBoPWfdOnT1d9fb26u7v7vWbVqlVqbm62brW1tQMJEwByUiKbDDArJLwuxGB0nKXHHntMK1eu1N13362tW7dq9uzZWrRokY4cOXLa6/bv369/+Id/0Ic+9CGbIgUAuBmnMAEAgBNoyWm/Ae20RSIRzZs3T+vXr7fuS6VSWr9+vaqqqvq95oorrtCePXuU6tXuZPfu3aqoqFAkEun3mmg0qqKioj43APC6bp9Vgphtv6gEwUDdf//9uvXWW7Vs2TLNmDFDa9asUUFBgR5++OFTXpNMJvXZz35W99xzj8477zwbowUAuJXVj5t2WAAAwEYcxLDfgHfaVq5cqZ/85Cf62c9+pnfeeUe333672tvbtWzZMknS0qVLtWrVKuvxt99+u44fP6477rhDu3fv1jPPPKN7771Xy5cvH7xnAQAeYA1GD/hjJoiZ7GEwOgaiu7tbW7Zs0cKFC637AoGAFi5cqE2bNp3yum9/+9sqLS3VF7/4RTvCBADkAPMUJuPJAACAncx9Hw5i2GdAg9El6cYbb1RjY6Puuusu1dfXa86cOVq3bp01LL2mpkaBQE9upbKyUs8995zuvPNOzZo1S+PGjdMdd9yhf/zHfxy8ZwEAHhD3WzusgDkYnTd9nLmjR48qmUxa6w5TWVmZdu7c2e81L730kh566CFVV1ef8e+JxWKKxWLW9y0tLWcVLwDAvQJUggAAAAdYaxD2Q2wz4CSIJK1YsUIrVqzo92cbNmw46b6qqiq98sorZ/OrAMAXUqm09ebnt3ZYzATBUGptbdXnP/95/eQnP9Ho0aPP+LrVq1frnnvuGcLIAABO66kEYQMCAADYJ9irA0gqlbbaY2HonFUSBAAwuOK9+jCYyQGvC1ntsNh4wJkbPXq0gsGgGhoa+tzf0NCg8vLykx7/3nvvaf/+/VqyZIl1nzmnLBQKadeuXTr//PNPum7VqlVauXKl9X1LS4sqKysH62kAAFzA3ICgKhUAANjJnEsmZSpSA/LHPpCTSIIAgAv0TgT4rRKEmSAYiEgkonnz5mn9+vW64YYbJGWSGuvXr++3SvXCCy/U9u3b+9z3jW98Q62trfr+979/ysRGNBpVNBod9PgBAO4RZCgpAABwQK9JEkqm0goHnYvFL0iCAIALxBO9K0H8kgRhJgjOzsqVK3XzzTdr/vz5uuyyy/TAAw+ovb1dy5YtkyQtXbpU48aN0+rVq5WXl6eZM2f2ub6kpESSTrofAOAv5inMFDNBAACAjfq0w2IdYguSIADgAmY1RMDo+2boZaEAlSA4OzfeeKMaGxt11113qb6+XnPmzNG6deusYek1NTUKBPyRTAQAnD3zrYJKEAAAYKdA73ZYrENsQRIEAFwg7rOh6FLPcyUJgrOxYsWKfttfSdKGDRtOe+3atWsHPyAAQM6xBqNzAhMAANio72B0BwPxEf/stgGAi5ntsCI+TIIkGIwOAAAcYJ7C5AQmAACw018ORsfQ889uGwC4mFkNEQr6oxWW1PNcE6m00rzpAwAAmzEYHQAAOCEQoB2W3UiCAIALdGeTIL5qh9VrZgPD0QEAgN0YjA4AAJxCW057+We3DQBcLJ7030yQ3lUvzAUBAAB2C1iVIA4HAgAAfCdIW05b+We3DQBcLJH99B0J+edluXfCJ85cEAAAYLOQlQQhCwIAAOxlNscgCWIP/+y2AYCL9bTD8s9MkN7PNcERTAAAYLMAM0EAAIBDaMtpL5IgAOACZiVEKOCfl2XDMKwemMwEAQAAdrPaULAMAQAANuMwhr38s9sGAC4WT2QrQXzUDkvqaUPRnaASBAAA2MsaSMrmAwAAsBmD0e3lr902AHCpRLYXdcRH7bAkKZKdC0IlCAAAsFvAqgRhHQIAAOzVMxjd4UB8giQIALhAd7YPQ+9h4X4QyiZ9mAkCAADsFmQgKQAAcEjAag/Ofogd/LXbBgAuZbbDCvkuCZJ5vt0kQQAAgM1ohwUAAJwSstYhDgfiE/7abQMAl4on/dkOK2yefGAiKQAAsBntsAAAgFNYh9iLJAgAuICZBPFbOyxzEDzlnwAAwG5UggAAAKeY6xDactrDX7ttAOBScb/OBMm+6cepBAEAADYLWr24WYcAAAB7WYcxqASxhb922wDApXxbCZJ9vnFmggAAAJsFaUMBAAAcks2BUAliE3/ttgGAS/UkQfw1EyQUZCYIAABwBu2wAACAU1iH2IskCAC4QLdP22FRCQIAAJwSsHpxOxwIAADwHQaj28tfu20A4FIJv7bDCpiD0XnTBwAA9jLbYdGLGwAA2I3B6Pby124bALiU1Q4r5M92WFSCAAAAuwXYfAAAAA5hMLq9SIIAgAvEzXZYAX+9LIesdli86QMAAHsxGB0AADjFaofFmVBb+Gu3DQBcqtun7bAi1mB03vUBAIC9zGUXA0kBAIDdaIdlL3/ttgGAS8UTPm2Hla18ifOmDwAAbEY7LAAA4BRmk9mLJAgAuIA5GDzis0oQayZIgkoQAABgrxBJEAAA4BCzGzrrEHv4a7cNAFzKv+2wMs83kSIJAgAA7BVgJggAAHAIg9Ht5a/dNgBwKbMSwqyM8AurEoTB6AAAwGb04gYAAE7pGYzOOsQOJEEAwAXiPq0ECZmVICRBAACAzTiBCQAAnMJhDHv5a7cNAFzKrzNBwgGzEoR2WAAAwF6cwAQAAE5hMLq9/LXbBgAu1Z3wZyWI+XzjzAQBAAA26zmB6XAgAADAdwLZdUiCwxi28NduGwC4lFkJ4b+ZILTDAgAAzuAEJgAAcErIbMtJEsQWJEEAwAXMweC+a4cVpB0WAABwRoBe3AAAwCGsQ+zlr902AHAp3w5GD2TbYVEJAgAAbGYuu9h8AAAAdjMrUtkOsYe/dtsAwKV6kiD+aodVnB+SJDW2xhyOBAAA+E0wexiDJAgAALBbkHZYtiIJAgAuYFZChEP+elmeMbZYkrTjULPDkQAAAL/pOYHJ5gMAALBXgHWIrfy12wYALmVVggT89bJ80dgiGYZU39KlI61dTocDAAB8xGyHxQlMAABgN9py2stfu20A4FJWEiTkr3ZYw6IhnT9muCSqQQAAgL04gQkAAJxCOyx7kQQBABfoTvhzMLokzRqXaYm1/WCLw5EAAAA/YfMBAAA4hcMY9vLfbhsAuJA5EyTiwyTITDMJcqjJ2UCA/7+9e4+Pqr7zP/6eM5OZSSQJYEgCGOWmIoKAIDEopXazTatF7U1WLVCqWBfptmbbKtqC1dZQq5ZdxaIgpRcp6C5eVvlhEcvDIlEqkJZyU+QuJBCFTMhtkpnz+yOZCRGiSTxzzmTm9Xw85iE5nJN85mvIfDKf8/18AABJxTB48wEAADiDmzHslXzvtgFAnAmHTdU1hiRJ/hS3w9HY75JzIkUQ2mEBAAD7RAejhx0OBAAAJB12gtiLIggAOKyhqfU37zRv8hVBhvXLkOGSKgINOhpgODoAALAHd2ACAACnRPIQbsawB0UQAHBYbbAp+ufUJNwJkub1aEh283B0doMAAAC7RO7AbArz7gMAALBX9GYMdoLYgiIIADisNtjcCsvnMaK9qZNNZC7IW3s+lEkCAAAAbOBxR958cDgQAACQdKLtsEhEbOFxOgAASHaReSDJ2AorYlReT63c/IEW/XWv1u48qvOze8gll1wuNT+if+7eRaKzvG7N+/olTocBAADEmw8AAMA57patCeQh9qAIAgAOqwtGiiDJ+yP5+tH9tfVQlV7+xxHtOVajPcdqnA4pJnqmpVAEAQAgTrT24ubNBwAAYC+3i3ZYdkred9wAIE5E2mGlJvFOkAx/in71zZGae+3Fen3nUQXqGmVKkmnKbP5PQiQGXg9dKAEAiBe8+QAAAJxicDOGrSiCAIDD6hqbB6Mn41D0j+vh8+jakf2cDgMAACQBgzYUAADAIdyMYS9uSQUAh9UFw5KSeycIAACA3SLtsHjzAQAA2C2yE6QpRB5iB4ogAOCw2mDzTpBkHowOAABgNzeD0QEAgEM8kXZY3IxhC4ogAOCwusaWmSC0wwIAALANvbgBAIBTojtSyUNsQREEABxWx2B0AAAA27ETBF31xhtvaNKkSerXr59cLpdeeOGFT71m3bp1uvTSS+Xz+TRkyBAtXbo05nECAOJXZCdII3mILSiCAIDDaluKILTDAgAAsI+bNhToopqaGo0cOVILFizo0Pl79+7VNddco6uuukplZWX6wQ9+oFtvvVWvvvpqjCMFAMQrr6f5PaBgU9jhSJKDx+kAACDZ0Q4LAADAfq1tKBwOBN3Ol7/8ZX35y1/u8PkLFy7UwIED9cgjj0iSLrroIq1fv16//vWvVVRUFKswAQBxzOtp3pvQQBHEFuwEAQCHtbbDoi4NAABgF3aCwC6lpaUqLCxsc6yoqEilpaXtXtPQ0KBAINDmAQBIHL6WIkiwKeRwJMmBIggAOIx2WAAAAPYzmAkCm5SXlysnJ6fNsZycHAUCAdXV1Z3xmpKSEmVmZkYfeXl5doQKALCJN1oEYSeIHSiCAIDD6hqbJNEOCwAAwE6RnSCSFKYQgjgze/ZsVVVVRR8HDx50OiQAgIWiRZAQRRA70HsFABxWG22HRREEAADALm5XaxEkZJoy5PqEs4Guy83NVUVFRZtjFRUVysjIUGpq6hmv8fl88vl8doQHAHBApB1WQyNFEDuwEwQAHFZHOywAAADbGaf8NkxLLMRSQUGB1q5d2+bYmjVrVFBQ4FBEAACn+dgJYiuKIADgsLpGiiAAAAB2a9MOi+Ho6ISTJ0+qrKxMZWVlkqS9e/eqrKxMBw4ckNTcymrq1KnR82+//Xbt2bNHP/7xj7Vz50498cQTevbZZ3XnnXc6ET4AIA543c3vATETxB4UQQDAYZF2WH5mggAdtmDBAg0YMEB+v1/5+fnauHFju+cuWrRIEyZMUK9evdSrVy8VFhZ+4vkAgORgnNIOq4mdIOiEd955R6NHj9bo0aMlScXFxRo9erTmzJkjSTpy5Ei0ICJJAwcO1CuvvKI1a9Zo5MiReuSRR7R48WIVFRU5Ej8AwHmRmSANFEFswUwQAHBYazssfiQDHbFixQoVFxdr4cKFys/P1/z581VUVKRdu3YpOzv7tPPXrVunG2+8UePHj5ff79cvf/lLffGLX9S2bdvUv39/B54BACAeeBiMji76/Oc/L/MTdg8tXbr0jNds2bIlhlEBALqTaDssiiC2YCcIADiMdlhA5zz66KOaMWOGpk+frmHDhmnhwoVKS0vTkiVLznj+M888o5kzZ2rUqFEaOnSoFi9erHA4fFpvbgBAcjm1HRYzQQAAgJ28FEFsRREEABxWG2ySJKXSDgv4VMFgUJs2bVJhYWH0mGEYKiwsVGlpaYc+R21trRobG9W7d+92z2loaFAgEGjzAAAkFpfLpUhHrBAzQQAAgI28pwxG/6TdhbAGRRAAcFA4bKq+sbnqn8pOEOBTVVZWKhQKKScnp83xnJwclZeXd+hz3HXXXerXr1+bQsrHlZSUKDMzM/rIy8v7THEDAOKTu6UKEuYmTAAAYKNIOyyJuSB2oAgCAA6qbwpF/0w7LCD25s2bp+XLl+v555+X3+9v97zZs2erqqoq+jh48KCNUQIA7GK0tMRiJwgAALCT95QiSDBEESTWmMILAA6qDbYWQfweiiDAp8nKypLb7VZFRUWb4xUVFcrNzf3Eax9++GHNmzdPr732mi655JJPPNfn88nn833meAEA8a11JwhFEAAAYB+v+5QiCDtBYo6dIADgoLqWIog/xYjeiQigfV6vV2PGjGkz1Dwy5LygoKDd6x566CE98MADWr16tcaOHWtHqACAbiAyHJ3B6AAAwE4ulytaCKEdVuyxEwQAHFTX2FwESfPy4xjoqOLiYk2bNk1jx47VuHHjNH/+fNXU1Gj69OmSpKlTp6p///4qKSmRJP3yl7/UnDlztGzZMg0YMCA6O6RHjx7q0aOHY88DAOA8g8HoAADAIT6PoWAozE4QG/CuGwA4KNIOKzWFVlhAR02ePFnHjh3TnDlzVF5erlGjRmn16tXRYekHDhyQYbRudv3Nb36jYDCob3zjG20+z9y5c3XffffZGToAIM6wEwQAADjF6zGkBtph2YEiCAA4KNIOK5Wh6ECnzJo1S7NmzTrj361bt67Nx/v27Yt9QACAbsndUjSnCAIAAOwWGY5OEST2mAkCAA6qa2ySJKVRBAEAALBdZCYpRRAAAGA3nycyEyTkcCSJjyIIADiIdlgAAADOcbua22GFmQkCAABsxk4Q+yRlO6xgU7hDSW5LPtz2mE4/eObzPn7OGa7r6Nc800EACaGWdlgAAACOMZgJAgAAHBIpgjSEKILEWlIWQea+tE1/2njA6TA6xHBJaV6PMvwe5Wb6ldc7TZee20v5g3rrwpx0CiRAN1ff2FwEoR0WAACA/SKD0dkJAgAA7OZt6cvZ0EgRJNaSsgjSnYRN6WRDk042NOlwVb02HzihF8sOS5KG5qbrxnHn6oaxedxFDnRTre2w+HEMAABgt0g7LG7ABAAAdvN5mt/PDZKIxFxSvus2d9Iw/eSai6Iff/yeH/MMdwGdfs6nnCDJ/NjBj1/Tka/bGDJV3xjS8dqgKgL1erfipP627yO9vfcj7Syv1tyXtmnBX3brzn+9QN8cc448bsa8AN1Jazss/u0CAADYjXZYAADAKcwEsU9SFkH83XAA8QCdJUn60vDmj6tqG/X8lkNavH6vDh2v0+yVW7X8bwf1q29cogty0h2MFEBntLbDSsofxwAAAI5iMDoAAHAKRRD7cOtxN5WZlqJvXzFQa/9zon76lWFK93v094Mn9JX/Xq/Ff91zxl0lAOJPbbBJkpTaDYuzAAAA3V1kJkgTO0EAAIDNfJHB6E0hhyNJfBRBujmfx61brhyoNXdO1L8MzVYwFNbPX9mhf//jZgXqG50OD8CnaG2HRREEAADAbtHB6BRBAACAzdgJYh+KIAkiN9OvxdPG6oHrh8vrNrR6W7mufWy9dhwJOB0agE/Q2g6LIggAAIDdmAkCAACc4qMIYhuKIAnE5XJpyuXn6bnbC9S/Z6r2fVir6xe8qZWbDzkdGoB2RHeC0A4LAADAdu7mGohCtBMGAAA287oj7bAogsQaRZAENDKvp17+3pWaeEEfNTSFVfzs3/Xwq7vY4g3EoUgRhMHoAAAA9qMdFgAAcIqv5YbYYIgiSKxRBElQvc7y6rffvkz//vnBkqTH/7Jb/7F8S7T1DoD4EPk3merlxzEAAIDdDFdLOyx2ggAAAJtFdoLQDiv2eNctgRmGS3d9aage+sYlSnG79PI/jujGRW/pWHWD06EBaNHaDoudIAAAAHZzMxMEAAA4JDIYnXZYsUcRJAncMDZPv/9OvjJTU7TlwAl99Yk39W5FtdNhAZBUF2QwOgAAgFOi7bDYCQIAAGzmixZB6NwTaxRBkkTB4LP1/MzxGnB2mg4dr9PXn9igN9495nRYQNKrDTZJklIpggAAANgu2g6LGzABAIDNIjtBaIcVexRBksigPj20cuYVGjegt6obmjR96d+04m8HnA4LSGp1kZkgKRRBAAAA7OaJtsPizQcAAGAviiD2oQiSZHqf5dUfbh2nr43ur1DY1F3/u1UPv7pLJtu/AduFw6bqG5tf6GiHBQAAYD/DYCcIAABwRmQwOjNBYo8iSBLyedx65IaR+o8vDJEkPf6X3frBijL6zwE2O9nSCkuSzvIxGB0AAMBu7kg7LG4KAwAANvO1dAVhJ0jsUQRJUi6XS8VfvFAPff0SeQyXXiw7rKlPb1RVbaPToQFJo7K6QZKU7vPITzssAAAA20UHo4cpggAAAHtFdoIE2ZIacxRBktwNl+Xpt9MvUw+fR2/v/UhfX7hBBz+qdTosICkcaymC9En3ORwJAABAcmpth0URBAAA2MvHTBDbUASBJpzfR8/dXqDcDL92Hz2prz6xQf84dMLpsICEd+xkcxEkqwdFEAAAACe4m2sgCtMOCwAA2CxSBGFEQexRBIEk6aK+GXrhjit0Ud8MVZ5s0OQn39Jr2yucDgtIaOwEAQAAcBY7QQAAgFO87ASxDUUQROVm+vXsdy/X5y7oo7rGkG77wzv6fek+p8MCElblSYogAAAATmIwOgAAcApFEPt0qQiyYMECDRgwQH6/X/n5+dq4cWOHrlu+fLlcLpeuv/76rnxZ2CDdn6Knp43Vv12Wp7ApzXlxm37xynYGBQIxENkJktXD63AkAAAAySkyGD0U4vcdAABgL2+0HRZFkFjrdBFkxYoVKi4u1ty5c7V582aNHDlSRUVFOnr06Cdet2/fPv3whz/UhAkTuhws7JHiNlTytRH6UdGFkqRFf92rWX/arPpG+tMBVqIdFgAAgLOiRRB2ggAAAJv5PG5J7ASxQ6eLII8++qhmzJih6dOna9iwYVq4cKHS0tK0ZMmSdq8JhUK6+eab9bOf/UyDBg36TAHDHi6XS3dcNUT/9W+j5HUbWrW1XDcvflsf1QSdDg1IGMdohwUAAOCoSBGEne8AAMBu0Z0gIYogsdapIkgwGNSmTZtUWFjY+gkMQ4WFhSotLW33uvvvv1/Z2dm65ZZbOvR1GhoaFAgE2jzgjOtG9dfvbxmnDL9Hm/Yf19eeeFP7KmucDgtICJXVzUXFrB4UQQAAAJxgMBMEAAA4xOtunQlikovEVKeKIJWVlQqFQsrJyWlzPCcnR+Xl5We8Zv369Xr66ae1aNGiDn+dkpISZWZmRh95eXmdCRMWu3zQ2Vo5c7zO6ZWqfR/W6qtPvKlN+z9yOiygWwuHTQajAwAAOCzaDosbMAEAgM18Ka1vzQdJRmKqS4PRO6q6ulpTpkzRokWLlJWV1eHrZs+eraqqqujj4MGDMYwSHTEkO10rZ47XJedk6nhto25c9Lb+39YjTocFdFsn6hrV1NJ24eyzKIIAAAA4IdoOi7svAQCAzSI7QSTmgsSapzMnZ2Vlye12q6Kios3xiooK5ebmnnb++++/r3379mnSpEnRY+Fw8/9Qj8ejXbt2afDgwadd5/P55PPxpmC8yU73a/ltl+s//rRFr+04qpnLNuveqy/SLVcOlKtlGzmAjokMRe+VlhLtAQkAAAB7RdthMRMEAADYjCKIfTr1zpvX69WYMWO0du3a6LFwOKy1a9eqoKDgtPOHDh2qrVu3qqysLPq49tprddVVV6msrIw2V91QmtejJ6eM1bSC82Sa0s9f2aH7XtrGLw1AJ0VaYTEPBAAAwDmR9x74fQYAANjNMFxKcTffkNFAESSmOrUTRJKKi4s1bdo0jR07VuPGjdP8+fNVU1Oj6dOnS5KmTp2q/v37q6SkRH6/X8OHD29zfc+ePSXptOPoPtyGS/dde7HyeqfpF6t26Hel+/XBiXr9942jlObt9LcUkJQiO0GYBwIAAOAct4t2WAAAwDk+j1uNoSZ2gsRYp9+xnjx5so4dO6Y5c+aovLxco0aN0urVq6PD0g8cOCDDoLVLonO5XLp1wiD165mqH6wo02s7KvRvT72lxdPGKjvd73R4QNyjCAIAAOA8o2UmSBM7QQAAgAO8HkNqYDB6rHXptv1Zs2Zp1qxZZ/y7devWfeK1S5cu7cqXRJy6ekRf5WT4dOvv3tE/DlXpa09s0NLpl2lIdrrToQFxjXZYAAAAzvNEBqNTBAEAAA6IzAVpaKQIEkts2cBnNua83lo58woNODtNh47X6WtPbNBbez50OiwgrrETBAAAwHmRnSDMBAEAAE7wpTS/PR8MhRyOJLFRBIElBmadpZUzr9Cl5/ZUoL5JU5/eqBfLPnA6LCBuHWvZCdKHnSAAAACOicwECTETBAAAOCC6E4SZIDFFEQSW6X2WV8tmXK6rR+QqGArr+8vL9Pjr78nkFwrgNOwEAQAAcJ6bdlgAAMBBXk/LThCKIDFFEQSW8qe49fiNl+q2zw2SJD3853f1gxVlqm9kSxdwKmaCAAAAOM+I7gRxOBAAAJCUIkUQdoLEFkUQWM4wXLrn6ov08+uHy2O49GLZYd3wZKnKq+qdDg2IC02hsD6sCUpiJwgAAICTPO7ITBDeeAAAAPbzsRPEFhRBEDPfuvw8/eGWfPVKS9E/DlVp0uPrteXAcafDAhxXeTIo05QMV3MbOQAAADijZ1pzLvbhyaDDkQAAgGTk9bglUQSJNYogiKmCwWfrxTuu1IU56TpW3aDJT72llZsPOR0W4Kgd5QFJ0oCss6J9qAEAAGC/vpl+SdIRdq0DAAAHMBjdHhRBEHPnnp2m/505Xv86LEfBprCKn/27SlbtUIjhg0hS/zxUJUm6pH+mw5EAAAAkt0gRpLyqXqbJ7ycAAMBevpRIOyzmKccSRRDYoofPoye/NUazrhoiSXryjT265Xd/U6C+0eHIAPv944PmIshwiiAAAACOyk73y+WSgqGwPqqhJRYAALCXr2UnSDDETpBYoggC2xiGSz8sulCP3Tha/hRD63Yd0/UL3tTuoyedDg2w1T9biiAjKIIAAAA4yusxlNXDJ4mWWAAAwH5eBqPbgiIIbDdpZD89993x6pvp155jNbru8fV6+R+HnQ4LsMWx6gYdqaqXyyVdTBEEAADAccwFAQAATokUQZgJElsUQeCIEedk6qVZV+ryQb1VEwxp1rItuv//tquRrV9IcJFdIIOyzlIPn8fhaAAAANA6F6TO4UgAAECy8bETxBYUQeCYPuk+/fGWfN0+cbAkacmbe3XjU2+pnDuwkMC2thRBLjmnp7OBAAAAQJLUNzNVknSY30MAAIDN2AliD4ogcJTHbejuLw/Vk1PGKN3n0Tv7j+srj/1VG96vdDo0ICa2MhQdAAAgruRGd4JQBAEAAPbyut2SKILEGkUQxIWii3P10veu1NDcdFWeDOpbi9/W46+/p1DYdDo0wFJbDzEUHQAAIJ60zgShHRYAALCXL4V2WHagCIK4MTDrLD0/8wp97dL+CpvSw39+Vzcvpj0WEsfR6nqVB1qGovfLcDocAAAAqLUdFoPRAQCA3bzuliIIc5JjiiII4kqq161HvjlSD39zpNK8br215yN96b/e0J+3lTsdGvCZvVR2WJI0rG+GzmIoOgAAQFxo3QlSL9NkJzoAALCPNzoYPeRwJImNIgjijsvl0jfGnKOXv3elhvfP0InaRt32h0265/mtOtnQ5HR4QJc0hcL67Zv7JEnfuvw8Z4MBAABAVE5GcxEk2BTW8dpGh6MBAADJhMHo9qAIgrg1qE8Prfz3KzRjwkBJ0rK3D+hL899gaDq6pdd2VOiDE3XqlZair47u73Q4AAAAaOH1GMrq4ZMkHT7BXBAAAGCfyI7U7YcDCjMbOWYogiCueT2G7r1mmJbdmq/+PVN16Hidblr0tua++E/VBtkVgu5jyfp9kqSb88+TP8XtbDAAAABoI/IGBPMIAQCAncYN7K10n0dHqxu0+cBxp8NJWBRB0C2MH5KlV+/8nG7KP1eS9LvS/frir9/Q6zsrHI4M+HQvln2gjfs+ksdwaUoBrbAAAADiTW5kLkiAIggAALCPz+PWv1yULUla/U9mIscKRRB0Gz18Hj341RH6/XfGqV+mX4eO1+k7S9/RjN+/o/ePnXQ6vJgwTVNNobDqG0Oqrm/URzVBHQ3U69DxWh34sFaHjteqIlCvD082qKq2UTUNTWpoCnVo+5xpmkk9+DEcNhVsCqsu2Ly2VbWN+vBkg44G6nWkqk5HqupUEajXseoGfVQTVFVtowL1jTrZ0NR8fl3zNdX1je2uo2maWvb2Af1gRZmk5lkgkZ7TAD6bBQsWaMCAAfL7/crPz9fGjRs/8fznnntOQ4cOld/v14gRI7Rq1SqbIgUAdAf9IkUQ2mEBAACbfWl4X0nS//tneVK/VxdLHqcDADrrcxf00Zriifqvte/p6fV7tWZ7hV7feVQ3jM3TrRMGanCfHh3+XI2hsPZ/WKvdR09qT+VJnaxvUkNTWA1NITU0hhUKmwqZpsJm85vmYdNUKNzysXnqx6bCYSnUUlg4/Rydcrz170JhU+GwqaaW401hU6FQ89dsCplqDIfV1Z99hkvyGIY8bpcktYkxbJrRz3vqeW7DJY/hktswlNLmY1f0nI9/fOo1HsMlU83PqzFkqikcbv5vKNzmWOS5hULNz/lULtcpf5arneORY81/MozWc0OnrGW45f9fKNz6iPydla8pHsOlnmkp6pnmVe80r3qmpciX4tamfR/pcEtLhZvzz9Wcrwyz7osCSWzFihUqLi7WwoULlZ+fr/nz56uoqEi7du1Sdnb2aedv2LBBN954o0pKSvSVr3xFy5Yt0/XXX6/Nmzdr+PDhDjwDAEC8yc1MlUQ7LAAAYL+JF/RRaopbH5yo0z8/CGjEOZlOh5RwXGY3KC8FAgFlZmaqqqpKGRkZToeDOPJeRbV+uXqnXttxNHrsyiFZ+sLQbI0b2Fs5GX718HkUqG/UseoGvX/spHYfbX68d/Sk9n9Yo8ZQ3P8TaMPlklLczQWHplPe3Ic1XC7J7XLJ5VK0mPVZeN2Gbp84SHf+6wXRog1glWR9fczPz9dll12mxx9/XJIUDoeVl5en733ve7r77rtPO3/y5MmqqanRyy+/HD12+eWXa9SoUVq4cGGHvmayrjUAJIv/+/thfe9PW+TzGPp+4fm6YWyePIZLRssNP0YC5XFWzqfj9dE+rDUAJLaZz2zSqq3l+tro/pr1hSHRVp0uudreGOxqvRG4u6YnHsNl2XtkHX19ZCcIurXzc9K1eNpl+tu+j/TUG3v02o4Krd9dqfW7Kzv8OdK8bg3u00NDsnuoV5pXXo8hn8eQ12PI6zaa3xRv+cXHMFwyWt4kb/Nxyz/e5uOSYbT82VDzeS5Xyzkt10Y+X8suDMNo/q/b+NjuC7dLXrchT0vRI8XdfM7HRXaTNIXDzf9t2X0R+bOkaCzuU762pOiuk1DL5wi17N449eOmUOTzt/9xKGyqMWzKJbXsImneTRLZMRI9Zrian4/bpZSW5+5S8w6SiDOVZiPHTj0vbLa09Trl792n/KLavJ6S2zCi/z8i6+wxXHK7m/8/nbrmZ/ohHN0FdMoOmsiLTihs6kRdUMdrGnWiNqjjtY06XhvUyYYmXdQ3Q+MG9Faql0HogFWCwaA2bdqk2bNnR48ZhqHCwkKVlpae8ZrS0lIVFxe3OVZUVKQXXnih3a/T0NCghoaG6MeBQOCzBQ4AiGtFF+dqwvlZ+ut7lXpo9S49tHqX0yHFhNtw6f0Hr3Y6DAAA8DFfGt5Xq7aWa+WWD7RyywdOhxNT2+8vUprX3rIERRAkhMsG9NZlA3pr/4c1+vO2Cr3x3jFtOxzQ8dqgTLO55VOvNK8G9TlLQ7J7RIse5+ekq2+GX8YZCgvdiWG45DVc8jLmJyYMwyVDrnZ/YKZ6U9W3pYUCgNiqrKxUKBRSTk5Om+M5OTnauXPnGa8pLy8/4/nl5e0PnSspKdHPfvazzx4wAKBb8HoM/f4747Ry8wd66NWdqgg0fPpFAAAAFrl6eK7e+8IQvbm7Uv88HFCwKex0SAmFIggSynlnn6UZnxukGZ8bJKm5nVFdY0hpKe5uX+gAANhn9uzZbXaPBAIB5eXlORgRACDWXC6Xvj7mHH19zDnReX6RXdM0nwUAALHkcRv6zy9eqP/84oVqCjV3aTFlntIZpbUbinTmLirdRaqFrTk7iiIIEprbcKmHj29zAEgUWVlZcrvdqqioaHO8oqJCubm5Z7wmNze3U+dLks/nk8/n++wBAwC6JZeruTWtRxK/TgAAADs1t5F3OorEQu8cAADQbXi9Xo0ZM0Zr166NHguHw1q7dq0KCgrOeE1BQUGb8yVpzZo17Z4PAAAAAAASB/e0AACAbqW4uFjTpk3T2LFjNW7cOM2fP181NTWaPn26JGnq1Knq37+/SkpKJEnf//73NXHiRD3yyCO65pprtHz5cr3zzjt66qmnnHwaAAAAAADABhRBAABAtzJ58mQdO3ZMc+bMUXl5uUaNGqXVq1dHh58fOHBAhtG62XX8+PFatmyZfvKTn+iee+7R+eefrxdeeEHDhw936ikAAAAAAACbuEwz/seoBAIBZWZmqqqqShkZGU6HAwBAXOD10T6sNQAAp+P10T6sNQAAp+vo6yMzQQAAAAAAAAAAQEKiCAIAAAAAANAJCxYs0IABA+T3+5Wfn6+NGze2e+7SpUvlcrnaPPx+v43RAgCQ3CiCAAAAAAAAdNCKFStUXFysuXPnavPmzRo5cqSKiop09OjRdq/JyMjQkSNHoo/9+/fbGDEAAMmNIggAAAAAAEAHPfroo5oxY4amT5+uYcOGaeHChUpLS9OSJUvavcblcik3Nzf6yMnJsTFiAACSG0UQAAAAAACADggGg9q0aZMKCwujxwzDUGFhoUpLS9u97uTJkzrvvPOUl5en6667Ttu2bbMjXAAAIIogAAAAAAAAHVJZWalQKHTaTo6cnByVl5ef8ZoLL7xQS5Ys0Ysvvqg//vGPCofDGj9+vA4dOtTu12loaFAgEGjzAAAAXUMRBAAAAAAAIEYKCgo0depUjRo1ShMnTtTKlSvVp08fPfnkk+1eU1JSoszMzOgjLy/PxogBAEgsFEEAAAAAAAA6ICsrS263WxUVFW2OV1RUKDc3t0OfIyUlRaNHj9bu3bvbPWf27NmqqqqKPg4ePPiZ4gYAIJlRBAEAAAAAAOgAr9erMWPGaO3atdFj4XBYa9euVUFBQYc+RygU0tatW9W3b992z/H5fMrIyGjzAAAAXeNxOgAAAAAAAIDuori4WNOmTdPYsWM1btw4zZ8/XzU1NZo+fbokaerUqerfv79KSkokSffff78uv/xyDRkyRCdOnNCvfvUr7d+/X7feequTTwMAgKRBEQQAAAAAAKCDJk+erGPHjmnOnDkqLy/XqFGjtHr16uiw9AMHDsgwWhtvHD9+XDNmzFB5ebl69eqlMWPGaMOGDRo2bJhTTwEAgKTiMk3TdDqITxMIBJSZmamqqiq2gAIA0ILXR/uw1gAAnI7XR/uw1gAAnK6jr4/MBAEAAAAAAAAAAAmJIggAAAAAAAAAAEhIFEEAAAAAAAAAAEBCoggCAAAAAAAAAAASEkUQAAAAAAAAAACQkCiCAAAAAAAAAACAhORxOoCOME1TkhQIBByOBACA+BF5XYy8TiJ2yEUAADgduYh9yEUAADhdR3ORblEEqa6uliTl5eU5HAkAAPGnurpamZmZToeR0MhFAABoH7lI7JGLAADQvk/LRVxmN7hlIxwO6/Dhw0pPT5fL5fpMnysQCCgvL08HDx5URkaGRREmL9bTOqyldVhLa7Ge1rF6LU3TVHV1tfr16yfDoMNlLJGLxC/W0zqspXVYS2uxntYhF+m+rMxFJP5dWYm1tA5raR3W0lqsp3WcykW6xU4QwzB0zjnnWPo5MzIy+Ka1EOtpHdbSOqyltVhP61i5ltx1aQ9ykfjHelqHtbQOa2kt1tM65CLdTyxyEYl/V1ZiLa3DWlqHtbQW62kdu3MRbtUAAAAAAAAAAAAJiSIIAAAAAAAAAABISElXBPH5fJo7d658Pp/ToSQE1tM6rKV1WEtrsZ7WYS0h8X1gNdbTOqyldVhLa7Ge1mEtEcH3gnVYS+uwltZhLa3FelrHqbXsFoPRAQAAAAAAAAAAOivpdoIAAAAAAAAAAIDkQBEEAAAAAAAAAAAkJIogAAAAAAAAAAAgIVEEAQAAAAAAAAAACSkhiyALFizQgAED5Pf7lZ+fr40bN37i+c8995yGDh0qv9+vESNGaNWqVTZF2j10Zj0XLVqkCRMmqFevXurVq5cKCws/df2TSWe/NyOWL18ul8ul66+/PrYBdiOdXcsTJ07ojjvuUN++feXz+XTBBRfwb71FZ9dy/vz5uvDCC5Wamqq8vDzdeeedqq+vtyna+PXGG29o0qRJ6tevn1wul1544YVPvWbdunW69NJL5fP5NGTIEC1dujTmccIe5CLWIhexDrmIdchFrEU+Yg3yEUSQi1iLXMQ65CLWIRexFrmINeI2FzETzPLly02v12suWbLE3LZtmzljxgyzZ8+eZkVFxRnPf/PNN023220+9NBD5vbt282f/OQnZkpKirl161abI49PnV3Pm266yVywYIG5ZcsWc8eOHea3v/1tMzMz0zx06JDNkcefzq5lxN69e83+/fubEyZMMK+77jp7go1znV3LhoYGc+zYsebVV19trl+/3ty7d6+5bt06s6yszObI409n1/KZZ54xfT6f+cwzz5h79+41X331VbNv377mnXfeaXPk8WfVqlXmvffea65cudKUZD7//POfeP6ePXvMtLQ0s7i42Ny+fbv52GOPmW6321y9erU9ASNmyEWsRS5iHXIR65CLWIt8xDrkIzBNchGrkYtYh1zEOuQi1iIXsU685iIJVwQZN26ceccdd0Q/DoVCZr9+/cySkpIznn/DDTeY11xzTZtj+fn55ne/+92YxtlddHY9P66pqclMT083f/e738UqxG6jK2vZ1NRkjh8/3ly8eLE5bdo0XuxbdHYtf/Ob35iDBg0yg8GgXSF2G51dyzvuuMP8whe+0OZYcXGxecUVV8Q0zu6mIy/0P/7xj82LL764zbHJkyebRUVFMYwMdiAXsRa5iHXIRaxDLmIt8pHYIB9JXuQi1iIXsQ65iHXIRaxFLhIb8ZSLJFQ7rGAwqE2bNqmwsDB6zDAMFRYWqrS09IzXlJaWtjlfkoqKito9P5l0ZT0/rra2Vo2Njerdu3eswuwWurqW999/v7Kzs3XLLbfYEWa30JW1fOmll1RQUKA77rhDOTk5Gj58uB588EGFQiG7wo5LXVnL8ePHa9OmTdFtoXv27NGqVat09dVX2xJzIuH1JzGRi1iLXMQ65CLWIRexFvmIs3gNSjzkItYiF7EOuYh1yEWsRS7iLLtegzyWfjaHVVZWKhQKKScnp83xnJwc7dy584zXlJeXn/H88vLymMXZXXRlPT/urrvuUr9+/U77Zk42XVnL9evX6+mnn1ZZWZkNEXYfXVnLPXv26PXXX9fNN9+sVatWaffu3Zo5c6YaGxs1d+5cO8KOS11Zy5tuukmVlZW68sorZZqmmpqadPvtt+uee+6xI+SE0t7rTyAQUF1dnVJTUx2KDJ8FuYi1yEWsQy5iHXIRa5GPOIt8JPGQi1iLXMQ65CLWIRexFrmIs+zKRRJqJwjiy7x587R8+XI9//zz8vv9TofTrVRXV2vKlClatGiRsrKynA6n2wuHw8rOztZTTz2lMWPGaPLkybr33nu1cOFCp0PrdtatW6cHH3xQTzzxhDZv3qyVK1fqlVde0QMPPOB0aABwGnKRriMXsRa5iLXIRwB0F+QiXUcuYi1yEWuRi3Q/CbUTJCsrS263WxUVFW2OV1RUKDc394zX5Obmdur8ZNKV9Yx4+OGHNW/ePL322mu65JJLYhlmt9DZtXz//fe1b98+TZo0KXosHA5Lkjwej3bt2qXBgwfHNug41ZXvy759+yolJUVutzt67KKLLlJ5ebmCwaC8Xm9MY45XXVnLn/70p5oyZYpuvfVWSdKIESNUU1Oj2267Tffee68Mg9p6R7X3+pORkcFdl90YuYi1yEWsQy5iHXIRa5GPOIt8JPGQi1iLXMQ65CLWIRexFrmIs+zKRRLq/4jX69WYMWO0du3a6LFwOKy1a9eqoKDgjNcUFBS0OV+S1qxZ0+75yaQr6ylJDz30kB544AGtXr1aY8eOtSPUuNfZtRw6dKi2bt2qsrKy6OPaa6/VVVddpbKyMuXl5dkZflzpyvflFVdcod27d0cTJkl699131bdv36R+oe/KWtbW1p72Yh5JoppnXqGjeP1JTOQi1iIXsQ65iHXIRaxFPuIsXoMSD7mItchFrEMuYh1yEWuRizjLttcgS8esx4Hly5ebPp/PXLp0qbl9+3bztttuM3v27GmWl5ebpmmaU6ZMMe++++7o+W+++abp8XjMhx9+2NyxY4c5d+5cMyUlxdy6datTTyGudHY9582bZ3q9XvN//ud/zCNHjkQf1dXVTj2FuNHZtfy4adOmmdddd51N0ca3zq7lgQMHzPT0dHPWrFnmrl27zJdfftnMzs42f/7znzv1FOJGZ9dy7ty5Znp6uvmnP/3J3LNnj/nnP//ZHDx4sHnDDTc49RTiRnV1tbllyxZzy5YtpiTz0UcfNbds2WLu37/fNE3TvPvuu80pU6ZEz9+zZ4+ZlpZm/uhHPzJ37NhhLliwwHS73ebq1audegqwCLmItchFrEMuYh1yEWuRj1iHfASmSS5iNXIR65CLWIdcxFrkItaJ11wk4Yogpmmajz32mHnuueeaXq/XHDdunPnWW29F/27ixInmtGnT2pz/7LPPmhdccIHp9XrNiy++2HzllVdsjji+dWY9zzvvPFPSaY+5c+faH3gc6uz35ql4sW+rs2u5YcMGMz8/3/T5fOagQYPMX/ziF2ZTU5PNUcenzqxlY2Ojed9995mDBw82/X6/mZeXZ86cOdM8fvy4/YHHmb/85S9n/PkXWb9p06aZEydOPO2aUaNGmV6v1xw0aJD529/+1va4ERvkItYiF7EOuYh1yEWsRT5iDfIRRJCLWItcxDrkItYhF7EWuYg14jUXcZkme3QAAAAAAAAAAEDiSaiZIAAAAAAAAAAAABEUQQAAAAAAAAAAQEKiCAIAAAAAAAAAABISRRAAAAAAAAAAAJCQKIIAAAAAAAAAAICERBEEAAAAAAAAAAAkJIogAAAAAAAAAAAgIVEEAQAAAAAAAAAACYkiCAAAAAAAAAAASEgUQQAAAAAAAAAAQEKiCAIAAAAAAAAAABISRRAAAAAAAAAAAJCQ/j9UzPfIzvt2TgAAAABJRU5ErkJggg==",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABkEAAAJtCAYAAACBs9diAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAC0F0lEQVR4nOzde3xU9b3v//daM5NJQm4EyAUIAnK/R1QEbasWRHRzatttPdpTWlvb0244P1t2211aL7UX6Tmtl+5dK7WtpXt3W2ut2osUVFqkFqyCRAEFRC4JkIRr7re5/f6YWZNEEiSQrLVmzev5eMwDMllDPoM4853v5/v5fIxYLBYTAAAAAAAAAACAx5hOBwAAAAAAAAAAADAQSIIAAAAAAAAAAABPIgkCAAAAAAAAAAA8iSQIAAAAAAAAAADwJJIgAAAAAAAAAADAk0iCAAAAAAAAAAAATyIJAgAAAAAAAAAAPIkkCAAAAAAAAAAA8CS/0wGcjWg0qiNHjig3N1eGYTgdDgAArhCLxdTY2Kjhw4fLNDnXMJBYiwAAcDrWIvZhLQIAwOnOdi2SEkmQI0eOqKyszOkwAABwpaqqKo0cOdLpMDyNtQgAAL1jLTLwWIsAANC791qLpEQSJDc3V1L8yeTl5TkcDQAA7tDQ0KCysrLk+yQGDmsRAABOx1rEPqxFAAA43dmuRVIiCWKVeubl5fFmDwDAu9ASYeCxFgEAoHesRQYeaxEAAHr3XmsRmnYCAAAAAAAAAABPIgkCAAAAAAAAAAA8iSQIAABIGRs3btTixYs1fPhwGYahZ5555j0f097erm984xu64IILFAwGNXr0aD366KMDHywAAAAAAHBcSswEAQAAkKTm5mbNnDlTn/70p/WRj3zkrB7zsY99TLW1tfr5z3+ucePGqbq6WtFodIAjBQAAAAAAbkASBAAApIxFixZp0aJFZ3392rVr9eKLL2rfvn0qLCyUJI0ePXqAogMAAAAAAG5DOywAAOBZf/jDH3TxxRfr//2//6cRI0ZowoQJ+vKXv6zW1tYzPq69vV0NDQ3dbgAAAAAAIPVQCQIAADxr3759eumll5SZmamnn35ax48f17/8y7/oxIkT+sUvftHr41auXKl77rnHxkgBAAAAAMBAoBIEAAB4VjQalWEY+u///m9deumluu6663T//ffrl7/85RmrQVasWKH6+vrkraqqysaoAQAAAABAf6ESBAAAeFZpaalGjBih/Pz85H2TJ09WLBbToUOHNH78+B4fFwwGFQwG7QoTAAAAAAAMECpBAACAZ11++eU6cuSImpqakvft2bNHpmlq5MiRDkYGAAAAAADsQBIEAACkjKamJlVUVKiiokKStH//flVUVKiyslJSvI3VkiVLktffcsstGjJkiG699Va9+eab2rhxo77yla/o05/+tLKyspx4CgAAAAAAwEYkQQAAQMrYsmWLysvLVV5eLklavny5ysvLddddd0mSqqurkwkRScrJydHzzz+vuro6XXzxxfr4xz+uxYsX69///d8diR8AAAAAANiLmSAAACBlXHnllYrFYr1+f/Xq1afdN2nSJD3//PMDGBUAAAAAAHArKkEAAAAAAAAAAIAnkQQBAAAAAAAAAACeRBIEAAAAAAAAAAB4EkkQAAAAAAAAAADgSSRBAAAAAAAAAACAJ5EEAQAAAAAAAAAAnkQSBAAAAAAAAAAAeBJJEAAAAAAAAAAA4El+pwMAAAAAvCgWi8kwDKfDAADgNNFoTLEe7u/pXevdb2W8twEAUg1JEABIM60dEf2+4rAee6VS+481q/yCwbpywjDdMmeUMgM+p8MDgJT2zLbD+sXf96vyZIsi0ZhunjNKn3vfWA3JCTodGgAAST/esFc/eG7PgP6MnnIl776rp4TKu+/x+wzlZQY0JCeomSPzdfHoQv3TjFI+uwAAzhpJEABII/UtIX3sJ5u1u7Yxed/GPce0cc8x/ebVKv3HLeWaUJzb5z9379FGvVZZp4MnmuU3TV0xfqjKywrk99F1EUD6eHnfCS1/okLRLkdrf/LiPv3X5oP6zg3T9JGLRjoXHAAANov1UGpy2l09XfQu4WhMbaF2HW1s11vVDXr81Sr9+/q3dec/TdH8yUVUpgAA3pMRi53FO47DGhoalJ+fr/r6euXl5TkdDgCkpPZwRJ/4+St6Zf9JDc0J6nPvH6M5Y4bo1QMnterFfTre1K6g39Sd/zRFH58z6qw+TLxWeUo//utevfDW0dO+N6IgS/992xyNHjpoIJ4OxPujnfi7xns52dyh6374N9U0tGnxzOH6lysv1OFTrfrh+re1/XC9JOkzV4zRikWTSBAD8Ix0fH98+OGH9fDDD+vAgQOSpKlTp+quu+7SokWLerx+9erVuvXWW7vdFwwG1dbW1qef299/122hiNpCkW73vXt3qKfNop62kHq+7t3XnE1GpOc/qyMcVUNbSIdOtWpbZZ2e3nZItQ3tkqQvzh+vL86f0MOjAADp4GzfH6kEAYA08bXfbdcr+08qN+jXr267VJNK4m8OM8sK9KFZI/Tl376uF/cc0x3P7NDf3j6m79wwXcNyT2/fEovFtOmdE3ror3u16Z0TkuKl7nPGFGpcUY7qWkL629vHdbiuVZ/9zy166l/mKTczYOtzBQC7fePp7appaNPYYYP0vY9M16CgX5NL83T1pCI98MIe/cdf9urnL+3XrpoG/ejmizR4UIbTISPNRaMxGQa9/YG+GjlypL73ve9p/PjxisVi+uUvf6kPfehD2rZtm6ZOndrjY/Ly8rR79+7k1274/y4z4Eu5dlJTh+dr4dQS/Z+rx+mH69/WIxv36cEX3tboIYN0Q/kIp8MDALgYSRAASAMv7jmmp7cdlt80tOoTs5MJEMuw3KB+8alL9Ojf9+v/rt2ldTtr9be3j+uz7xurG8pHaPSQbJ1qCenlfSf0yMZ9qqiqkyT5TUMfuWiEPv+BCzV2WE7yz6ttaNPi/3hJbx9t0pd+U6FHPnGxTNP5D3sAMBD2HWvSn3fUyDCk/7i5XIOCnUts0zT0r9dM1JTSPP3rb1/X3/ee0P946CXdef0UXT2piKoQOKItFNHi/3hJfp+pPy67XH6fqU3vHNe+Y8265dJRvGcDZ7B48eJuX3/3u9/Vww8/rJdffrnXJIhhGCopKbEjvLQwKOjX16+bLMOIt5386pNvqKwwW7MvGOx0aAAAlyIJAgAeF4pE9e0/vSlJ+uS80bp83NAerzNNQ7e9b6wuGztE33hmh16vqtMP17+tH65/W1kBn1q7lMsH/aZuvnSUPvv+sRpRkHXan1Wcl6lHllysj/1ks15466he3ndC83r5uQCQ6n79SqUk6eqJRZo6PL/HaxZNL9WYYYP0uf/cqsqTLfrcf21VSV6m5owt1PiiHI0cnK2S/EyNHTpIw3KDrjglDG9oaAspJ8PfLbHxwlu1evtokyTpH/tPavYFg/W//2urGtvCys8KaPHM4U6FC6SUSCSi3/72t2pubtbcuXN7va6pqUkXXHCBotGoLrroIt177729Jkws7e3tam9vT37d0NDQb3F7xb8tnKSDx1u0dmeNvvH0dj37/71PPpK4AIAekAQBAI/71csHtfdokwoHZej/++D497x+2oh8PfMv8/TnHTV69KX9euNwfTIBMmboIF07rUSfuWKMhuac3iqrq1llBbp6YpHW7qzRm9UNJEEAeFJbKKLfbj0kSbplzqgzXjupJE9/WHa5Ht7wjn679ZBqGtr0+4ojp11XOChD00fk65LRg3Xx6ELNKitwfcuSWCymUCSmaD+NGzQMKWCaVCS8hxNN7aptaNekktzT/q7qW0L6wXO79at/HNT7xw/To5+6JLk5+LvEv1lJ+vOOatW3htTYFpYkPfjCHl03vVShSFTrdtboT29U6+3aRv3fj87QnLFDTothd02jnt52WIYhleRl6sJhOZo6PK9PLd9ONndox+F6RWMx+U1T44tzVNTHZGBdS4f2Hm1SdX2bwtGoRhRka0JxjgqyaT2H/rd9+3bNnTtXbW1tysnJ0dNPP60pU6b0eO3EiRP16KOPasaMGaqvr9cPfvADzZs3Tzt37tTIkSN7/RkrV67UPffcM1BPwRNM09D3Pjpdm945rl01jfrd1kP62CVlTocFAHAhBqMDgIedbO7Qld//qxrawvruh6fp43Mu6POfEYpEdeB4s4ryMpWf1bfZHj9Yt1s/+ute3XxpmVZ+ZEaffzbOjPdH+/B3jd48ve2QvvSb1zU8P1N/+7erz/oEans4ok17T2hXTaP2Hm3SkbpWHalvVdXJFkXftToP+AyNL8rV6KHZKs3PUm6mX7mZAeUG/Z2/z4z/Pifol99nymcYMk3JZxqKRGOKRONJinA0qnAkpnA0pkg0qlAkptZQRC3tETW1h9XcHlZzR7jz94n7Wzriv29NDNJt7Tj99++Ouz8EfIYyfKaCAZ8yfKYy/KaC/vivGX5TGT5TmQGfgv74NUG/qcyAqaA/cZ/fl/jalN9nKhaLKRqTorGYYjEl/5xg4haJSuHE30skGlXAZyo7w6esDL+yM3wKR2JqaAupIZE0iMZiyssKKD8roLzMgIIBUw2tIdW3xq9pC0VVnJ+pEQWZGpydoewMv6pOtWjfsfhm/fGmdhXlZmpKaZ7GDhuk0vwsHTjRrL+9fUw7Djdo79EmBfymPjipSFdPKtLEklw1toX1X5sP6o9vHNHeRDXHtBF5uuP6KboskaR4bmeNVjy1XSeaO5J/l8uuGqcvL5yoow1tumzl+uR/r6E5Qc0qK9ALb9Umr/3Kwon64+tHtKumMXnf0Jyg1tx+hYpyMyVJu2oadM8f3tTmfSd6/G83rihHH5xcpAWTi1U+avBp/2/Ut4b0h4rD+uPr1dpy8ORp/36G5gQ198Ihev/4ofrAhGEqysvs9v3Wjoi2HDypl/Ye16a9J7TjSP1pg5gz/Ka+e8M03Xgxm6IDJV3fHzs6OlRZWan6+no9+eST+tnPfqYXX3yx10RIV6FQSJMnT9bNN9+sb3/7271e11MlSFlZWdr9XZ+Nn/1tn77z7Fsqyg1qw1euVHYG530BIF2c7VqEJAgAeNidz+zQf718UJNKch0pD39m22F98TcVumT0YP328/Ns/dnpgPdH+/B3jd7cuGqTXj1wSv+6YIL+z1lU272XtlBEu2sata3ylF49eEpbDpxUbUP7ez8QaSHDb8qQ1B6OdruvI/H1pJJcjR02SGu210iKJyKunVqiH/11ryTpoVsu0qFTLVr5512aOTJf+483qyFRASJJH5o1vFt10pBBGbr50lF6/s1a7a5t1NyxQ/SVayfqH/tO6oHn96gjEpXPNLRgcrFK8jN1pK5Vu2sbdfBES7e4CwdlaM6YQl0wZJAMQ9p5pEGv7D+htlDn8xg7dJCygz61dkS0/3jzaUmRSSW5KivMVobP1P7jzdpT26jwuy4aUZClEQVZ8pmGKk+26HBdqyTpf79/rL62aBJt5gYA749x8+fP14UXXqif/OQnZ3X9jTfeKL/fr1//+tdn/TP4u+5dezii+fe/qKqTrf32fgwASA1n+/5IehwAPGpXTYP++x8HJUl3L57qSH/ccUXxYenWSVUA8JJDp1r06oFTMg31W/uNzIBPM8sKNLOsQJ+6fIxisZgOnWrVntpGHTjRoqONbWpsC6uxLaymtlDy941tITUmqjfOVJER8BnymYYCpim/z5DPjFdO5ATjlQ6DgvFqks5f4/cNSlRCZGf4lZURr77ICviUlRH/NTPgU6bfJ5+vf95rorGYQuGoOiJRdYSjag93/toejqgj3P3+tnBE7aHO77eHo2oLxX+N3x9RKBKVaRgyTUNmYjM81OX6jnBUpmko4DPkN035TUMdkahaOiJq6YiotSMsv89UbqZfeZkB5WUFZBrxigar8qM9HFVeZqIyJMuvoN+nmvo2HalvVX1LSE0dYZXmZerCohyNHJylIYOCOlLfqjePNKjqZIuaOyLKCvh02dhCXTKmUOOLcnWquUPrdtbolf0n1dgeT1hMH5GvT18xWh+YUKRoLKYHX9ijx1+p0q6axmT1xufeP1ZfvmaiMvym6ltD+q+XD2rpY6/Jn1gP3HTJKG05eFJPvXZYkjSxOFf3fni6Xnr7uE40d2jaiDw98omLNbwgSzeUj9D/+NFL2rzvhD7y403J/04fnFSkb90w7bT5YPUtIW18+5jWv1Wrv+4+ppPNHfrzjprT/jtPKM7Rxy4u07XTSjRycHby/taOiLYfrtff3j6mF/cc0/bD9d2em6U0P1OXjxuqK8YN1bwLh3SrFolGY3pw/dv69/Vv6ycb92nO2EJdPan4nP9NAmcSjUa7VW2cSSQS0fbt23XdddcNcFTpI+j36cvXTNTtj1fol5sP6HMfGKug391tJAEA9iIJAgAeFI3G9K0/vqloTFo0rURzLzy9h7cdxg4bJEk61RLSiaZ2DXmPOSIAkEqefzPePuji0YUqflernv5iGIbKCrNVVpj93hcnRKMxRWLxFljRWEymYSjgM2Ua4iS8w2KxWK//DWKxmBpaw8rK8CnDb3b73scuKVM0GlPVqRa1hiKaWJzb7c/5zg3T9a8LJmrdzhq9vO+EPjp7pN43fljy+3f802TFFNMTWw6pIxxV0G/q+hmlGpYbTCZBPlQ+XIOCfv3y05fqH/tP6pZLRykrI76JOK4oR/d/bKa+9cc3ZZqG8rMC+l+XXaD/eUlZj88nPzs+XH3xzOEKR6LacvCUdiYSPaFIVJNL8zSrrEBTh+f1+PisDJ8uHVOoS8cU6l+vmagTTe36x/6TqmsJqT0cUWl+pmaMLFBpfmavf5+maWj5ggk6UteqJ7ce0qsHTpEEQb9YsWKFFi1apFGjRqmxsVGPPfaYNmzYoHXr1kmSlixZohEjRmjlypWSpG9961u67LLLNG7cONXV1en73/++Dh48qNtuu83Jp+E5108v1ff+vEvV9W364+vV+ufZvc9bAQCkH5IgAOAxkWhMX/vdG9r0zgll+E19/brJjsWSneHXiIIsHa5r1d6jTSRBAHjKczvjSZBrprhrY9U0DZky5PJZ6mnpTEkowzCUn9377C3TNHTBkEG9fn/woAz9z0tH6X9eOuq07wX9Pn3nhun64vwJ+kPFEU0ozlV+VkDvGz9UQwZlqLE9rP8xc7gkadqIfE0bkX/an3HttFJdO630TE+vR36fqcvGDknOKzkXQ3KCum5633+2JJWPKtCTWw9px+H6c/75QFdHjx7VkiVLVF1drfz8fM2YMUPr1q3TggULJEmVlZUyzc5E5qlTp/TZz35WNTU1Gjx4sGbPnq1Nmzad1fwQnD2/z9Qn5l6g/7d2t37x9/366EUjSPwDAJJIggCAhxw43qzvr9utZ7dXy2ca+v4/z+jT6eGBMK4oR4frWvXOsWbNOY8NEABwk1PNHXrlwElJ0sKpJQ5HA5ydoTlBffqKMcmvMwM+PfmFeWrtiHRrR+Ul0xMJne2H689YiQOcrZ///Odn/P6GDRu6ff3AAw/ogQceGMCIYLn5klH64Qtva+eRBm05eEqXjC50OiQAgEuQBAGAFLf14Cm9uOeYXnr7mF6rrJMk+U1D/35z+TmfmuxP44py9OKeY8wFAeAp63cdVSQa0+TSPMeTzcD5GDO09+oSL5hYkquAz1BdS0iHTrXy/yvgYYMHZejD5SP0+KtVWv33AyRBAABJJEEAIEXFYjF9f91u/XjDO8n7TEN63/hh+sKVF55X24n+lByOfowkCADveG5nfMiz21phAegu6PdpQnGudh5p0I7D9SRBAI/7xNwL9PirVXr+zVqdau7Q4EEZTocEAHABkiAAkIJisZj+37rdejiRALl+eqmuGD9UV08qGrDhvOfKSoK8QyUIAI9oC0W08e1jkmiFBaSC6SPytfNIg7YfrtciF1TJAhg4U4fna0ppnt6sbtAf3ziiJXNHOx0SAMAFzPe+BADgNn94/UgyAfLNxVP00Mcv0s2XjnJdAkSSxg2LJ0EO17WquT3scDQAcP5e3ndCbaGohudnanJprtPhAHgP07rMBQHgff88e6Qk6XdbDzkcCQDALUiCAEAK+t1rhyVJ//sDY/Wpy8e8x9XOGjwoQ4WJMvR9x5odjgYAzt/GPcclSe+fMIwhy0AKsIaj70gMRwfgbR+aNVx+09Drh+q1p7bR6XAAAC5AEgQAUkx9S0ib9sY34G66uMzhaM7OhcPiQ1f3nyAJAiD1Wa2w3j9hmMORADgbE0ty5TcNnWoJ6XBdq9PhABhgQ3KCumpSkSSqQQAAcSRBACDFvPBWrcLRmCYW52psotWU2+UE4yOo2kIRhyMBgPNzpK5Ve482yTSkyy8c6nQ4AM5CZiA+HF2KV4MA8L6PXhRvifVMxWFFo1SAAUC6IwkCACnmzztqJEkLp6XOMF6/L/52E47wAQRAatu4J14FUj5qsPKzAw5HA+BsjUlUpR6pa3M4EgB2uGrSMOUG/aptaNfWylNOhwMAcBhJEABIIU3t4WQblkUplAQJ+OI988PRqMORAMD5SbbCGk8rLCCVBK0DGaxFgLQQ9Pu0YGqxJOnZN6odjgYA4DSSIACQQv6666g6wlGNHpKtSSW5Todz1nxm/O0mRCUIgBQWjkT10tvWUHRaYQGpxJ84kMFaBEgf/zSjVJK0Znu1IrTEAoC0RhIEAFLIlgMnJUkfnFwswzAcjubsBcxEJUiE05cAUtf2w/VqaAsrPyugGSMLnA4HQB8EfNaBDNYiQLq4Ytww5Wb6dbSxPfk5CgCQnkiCAEAKqTrVKkm6MEUGolv8yXZYnMACkLpe3hffQLlsbKF8ZuokogGQBAHSUYbf1MKp8RbCz26nJRYApDOSIACQQqpOtkiSygqzHI6kbxiMDsALXt53QpJ02dghDkcCoK8y/LTmBNLR9cmWWDW0xAKANEYSBABSRCwW06FEJUjZ4GyHo+mbZDsshpECSFGhSDTZSoMkCJB6Aomq1I4waxEgnVx+4VDlZwV0vKldr+ynJRYApCuSIACQIo43dag1FJFhSMMLUrMShNOXAFLV9sP1au6IqCA7oInFuU6HA6CP/GaiKpUDGUBaibfEKpYkPbv9iMPRAACcQhIEAFJE1al4K6zSvMxkS4dUkZwJQh9uACnKaoU1Z0yhTOaBACkn2Q4rzIEMIN1cP2O4JGntjho+jwBAmkqtXTQASGPWPJCRhanVCkuSAsnTl2w8AEhNnUPRaYUFpCKrHRaD0YH0M+/CISrIDuh4UwctsQAgTZEEAYAUkarzQCTJZ7LxACB1dZ0HMvdCkiBAKgokWnN2sBYB0k7AZ+raqSWSpD9tr3Y4GgCAE0iCAECKsCpBygpTax6I1Hn6MsxMEAAp6I1D9WrpiGhwdkATipgHAqQiaz4ZaxEgPV0/o1QSLbEAIF2RBAGAFGHNBBmZgpUgyY0H2mEBSEGd80CGMA8ESFEZtMMC0trcsfGWWCebO7Tl4CmnwwEA2IwkCACkiKqTVjus1KsE8Sc2DcNRNh4ApB4rCXLZ2EKHIwFwrmiHBaQ3v8/UBycVS5LW7axxOBoAgN1IggBACohEYzpSl0iCpOJgdFpQAEhR8Xkg8ROjlzEPBEhZ1lqEShAgfS2cGk+CPLezVrEYn0sAIJ2QBAGAFFBd36pwNKaAz1BxXqbT4fSZnxYUAFLUG4fq1BpiHgiQ6phPBuB944cpM2DqcF2rdh5pcDocAICNSIIAQAqwWmGNKMiSLwX70QdMZoIASE0v7zspSbpsLPNAgFRGJQiArAyfPjBhmCTpOVpiAUBaIQkCACnAGoqeiq2wJCUTN2w8AEg1nfNAaIUFpLLOmSAcyADS2cKpJZKkdTtrHY4EAGAnkiAAkAIOnYwnQUYOTs0kiJ8WFABSUEe4yzwQkiBASqMSBIAkfXBSsXymod21jTpwvNnpcAAANiEJAgApoLq+TZI0oiD15oFInRsPEdphAUgh2w/H54EUDsrQ+KIcp8MBcB46Z4KQBAHSWX52QJeNLZQkPfcmLbEAIF2QBAGAFNDcEZYk5WYGHI7k3PitdlhRNh4ApI5X9serQC4ZPZh5IECK66wE4UAGkO5oiQUA6YckCACkgJaOiKT4ML9UZG080A4LQCrZejA+FP3iCwodjgTA+eqcCcKBDCDdLZhSLEl6rfKUjja2ORwNAMAOJEEAIAVYSZDsFE2CWDNB6MMNIFXEYjFtPRivBJk9erDD0QA4Xxl+1iIA4krzszRzZL5iMemFN486HQ4AwAYkQQAgBbSmehLETFSCMBMEQIrYd7xZp1pCCvpNTRue73Q4AM4TVakAurom2RKLuSAAkA5IggBACmgNJdphBfwOR3Ju/AwjBZBith6IV4HMHFmgDD9LZiDV+WmHBaALay7IpneOq6Et5HA0AICBxic6AEgBqV8JYrWg4PQlgNRgtcK66AJaYQFeEOjSmjMWYz0CpLtxRTkaO2yQQpGYNuw+5nQ4AIABRhIEAFJAS0dYUuoPRo/QDgtAitiSHIpOEgTwgozEWiQWYz0CIG4hLbEAIG2QBAGAFGANRs8KpGYSJNkOK0oLCgDud6q5Q+8ca5ZEJQjgFdaBDIkZZQDirCTIhl1H1ZZoPwwA8CaSIADgcpFoTO3hePIgddthxd9uaIeF/rBx40YtXrxYw4cPl2EYeuaZZ876sX//+9/l9/s1a9asAYsPqe+1yngrrLHDBqlwUIbD0QDoD9aBDIm5IADiZozIV3FeUM0dEW1657jT4QAABhBJEABwudYup5KyM1JzMHqAwejoR83NzZo5c6YeeuihPj2urq5OS5Ys0Qc/+MEBigxesSUxD4RWWIB3BMzOj76hMOsRAJJpGrpmSrwa5LmdtQ5HAwAYSCRBAMDlrHkghiFlBlLzZdufaEERov0E+sGiRYv0ne98Rx/+8If79LjPf/7zuuWWWzR37twBigxesfWAlQQpdDgSAP3FNA35TWs4OusRAHFWS6zn36xlXhAAeFhq7qYBQBpp7TIPxDCM97janaxNBypB4JRf/OIX2rdvn+6+++6zur69vV0NDQ3dbkgPHeGoXj9UJ0maPZpKEMBLrLkgIdYjABLmjC1UXqZfJ5o7tDVRCQoA8B6SIADgclY7rFSdByJ1JkGiMSnKCSvY7O2339bXvvY1/epXv5Lff3Yt5VauXKn8/PzkraysbICjhFvsPFKv9nBUg7MDGjt0kNPhAOhH1lwQkiAALAGfqQ9OLpYkPbezxuFoAAADhSQIALhcS6ISJDOQwkkQX+fbTZgkCGwUiUR0yy236J577tGECRPO+nErVqxQfX198lZVVTWAUcJNrFOgsy8YnLLVdwB6lpGsBGEtAqDTNVMSSZA3axWL8foAAF6UmhN2ASCNWO2wUrkSxBqMLknhaFQZ5OBhk8bGRm3ZskXbtm3TsmXLJEnRaFSxWEx+v1/PPfecrr766tMeFwwGFQwG7Q4XLtCZBGEeCOA1tMMC0JMPTBymoN9U5ckW7a5t1KSSPKdDAgD0M5IgAOByViVIVkbqvmT7zc6kB6cvYae8vDxt3769230//vGP9Ze//EVPPvmkxowZ41BkcKNYLKYtiSTIxcwDATwn4I8fyuggCQKgi+wMv943fqheeOuo1u2oJQkCAB7U56O4Gzdu1OLFizV8+HAZhqFnnnnmjNc/9dRTWrBggYYNG6a8vDzNnTtX69atO9d4ASDttHSEJUnZKdwOq1slCBsPOE9NTU2qqKhQRUWFJGn//v2qqKhQZWWlpHgrqyVLlkiSTNPUtGnTut2KioqUmZmpadOmadAgZj6g06FTrTrW2K6Az9D0EflOhwOgnwUShzLCHMgA8C7XTCmRJD33JnNBAMCL+pwEaW5u1syZM/XQQw+d1fUbN27UggULtGbNGm3dulVXXXWVFi9erG3btvU5WABIR15oh2UYhnyJ4ejMBMH52rJli8rLy1VeXi5JWr58ucrLy3XXXXdJkqqrq5MJEaAvthw8KUmaNiI/pecwAegZ7bAA9OaDk4tkGtLOIw06dKrF6XAAAP2sz71VFi1apEWLFp319Q8++GC3r++99179/ve/1x//+Mfk5gUAoHed7bBSe0POZxqKRGNsPOC8XXnllWccWrl69eozPv6b3/ymvvnNb/ZvUPCE1w7WSZJmj6IVFuBFtMMC0JshOUFdPLpQr+w/qed21urTV9AyFQC8xPbJtNFoVI2NjSosZNgkAJyN1lDqV4JIUsCqBKEFBQCXev1QnSRp1qgCR+MAMDCSlSBhkiAATnfNlGJJtMQCAC+yPQnygx/8QE1NTfrYxz7W6zXt7e1qaGjodgOAdNXZDit1B6NLkj+x8UA7LABu1BaK6K3q+JpzVlmBs8EAGBAB1iIAzmDh1PhckFf2n9Sp5g6HowEA9CdbkyCPPfaY7rnnHj3xxBMqKirq9bqVK1cqPz8/eSsrK7MxSgBwF6sdVqr3p7eGo4ejnL4E4D5vVjcoFIlpaE6GRhRkOR0OgAFgrUVozQmgJ2WF2ZpcmqdoTHrhrVqnwwEA9CPbkiCPP/64brvtNj3xxBOaP3/+Ga9dsWKF6uvrk7eqqiqbogQA92kNhSWlfjssv5k4fUk7LAAuVFFZJyleBWIYhrPBABgQViVIB+2wAPSisyUWSRAA8BJbkiC//vWvdeutt+rXv/61rr/++ve8PhgMKi8vr9sNANJVS4c3ZoL4OX0JwMUqquokSTNHFjgaB4CBk5wJwoEMAL2wWmL97e1jybbEAIDU1+ckSFNTkyoqKlRRUSFJ2r9/vyoqKlRZWSkpXsWxZMmS5PWPPfaYlixZovvuu09z5sxRTU2NampqVF9f3z/PAAA8zkqCZKV4EoQ+3ADcjKHogPdlJNciHMgA0LPJpbkaOThLbaGoNr59zOlwAAD9pM9JkC1btqi8vFzl5eWSpOXLl6u8vFx33XWXJKm6ujqZEJGkRx55ROFwWEuXLlVpaWnydvvtt/fTUwAAb2v1SiWISSUIAHc62dyhgydaJEkzqAQBPMuqSqUdFoDeGIaha6bEq0HW7axxOBoAQH/x9/UBV155pWKx3k/xrl69utvXGzZs6OuPAAB00RpKVIIE+vyS7Sq+RBKEmSAA3MaqAhk7bJDyswLOBgNgwNAOC8DZuGZqsR79+36tf+uowpGo/D7bxukCAAYIr+QA4HJemQlibTxEaIcFwGW6DkUH4F2dSRAqQQD07uILBqtwUIbqW0N65cBJp8MBAPQDkiAA4HKtHWFJqZ8EYTA6ALeyhqKTBAG8LcNnVaWyFgHQO7/P1AcnFUmSnttZ63A0AID+QBIEAFzOqgTJDKR2EiRgMhgdgPvEYrHOoegkQQBPs1radNAOC8B7uGZqfC7IcztrztgSHgCQGkiCAIDLeWYwOpUgAFzo4IkW1bWElOE3Nakkz+lwAAwg2mEBOFvvGz9UWQGfjtS3aeeRBqfDAQCcJ5IgAOBisVhMLSErCZLag9Gt05cMRgfgJlYVyNThecrwszQGvCyDAxkAzlJmwKcPTBgmSVq3s8bhaAAA54tPegDgYh2RaHKQeFaKV4IEzEQf7igbDwDcYxtD0YG00VkJwoEMAO/tmqnFkpgLAgBeQBIEAFzMaoUlpX47LJ9pnb5k4wGAezAPBEgfAT/tsACcvQ9OKpbPNLS7tlEHjjc7HQ4A4DyQBAEAF2tNtMIK+Izk6cVUZcUfYTA6AJfoCEeTfb5JggDe5zdphwXg7OVnB3TZ2EJJ0nNv0hILAFJZau+oAYDHtSQqQbICqV0FIjEYHYD7vFXdoI5wVIOzAxpVmO10OAAGWAaVIAD66JopJZJoiQUAqY4kCAC4mNUOK9WHokuS30wMRqcSBIBLWK2wZpYVyDAMZ4MBMOCYCQKgrxZMic8F2Vp5Ssca2x2OBgBwrkiCAICLJStBUnweiBRv6SVJYU5fAnCJisRQ9JkjCxyNA4A9OpMgrEUAnJ3hBVmaMTJfsZi0/i2qQQAgVZEEAQAXa+kIS/JaOyxOXwJwhwprKPqoAkfjAGCPAK05AZyDaxLVIOt2MhcEAFIVSRAAcLHOdlgeSIIk22Gx8QDAefUtIe071ixJmkUlCJAWkpUgYQ5kADh710yNzwX5+94TamoPOxwNAOBckAQBABfzUjssv2m1w2LjAYDzrHkgFwzJ1uBBGc4GA8AWVhKkg0oQAH0wvihHY4YOUkckqhd3H3M6HADAOSAJAgAu1hryUCWIj8HoANzj9ao6SdKssgJH4wBgn+R8MqpSAfSBYRi0xAKAFEcSBABcrLMdlt/hSM4fg9EBuElFIgnCUHQgfdAOC8C5umZqPAny111H1RHm8wwApBqSIADgYt5qh5XYeKASBIDDYrFYsh0WQ9GB9JFMgnAgA0AflZcN1tCcoBrbw3p53wmnwwEA9BFJEABwsZZQfPBeVsADSRAqQQC4xKFTrTre1KGAz9CU0jynwwFgE6sqlZkgAPrKNA0toCUWAKQskiAA4GKd7bBSPwnS2Q6LShAAzrJaYU0uzVOmB5LMAM6OVQnCWgTAubBaYj3/Zq2iVLcDQEohCQIALkY7LADofwxFB9IT7bAAnI95Fw5RTtCvo43tybaaAIDUQBIEAFwsWQnigZPKtMMC4BYMRQfSE+2wAJyPoN+nKycOkySt21nrcDQAgL4gCQIALtYa8l4lSJhKEAAOCkWi2nGkXhJD0YF0QyUIgPN1zdQSSdJzbzIXBABSCUkQAHAx60O69aE9lVEJAsANdtc0qi0UVW6mX2OGDHI6HAA2yvAzEwTA+bly4jAFfIb2HWvW3qNNTocDADhLqb+rBgAeZn1I93sgCZIcjE4lCAAHVXSZB2KahrPBALBVcjB6NMZQY5yzhx9+WDNmzFBeXp7y8vI0d+5c/fnPfz7jY377299q0qRJyszM1PTp07VmzRqbokV/y8sMaN6FQyVJ63ZSDQIAqSL1d9UAwMMiiQ/oAQ9s1CUHo1MJAsBBrzMPBEhbVlWqJIWirEdwbkaOHKnvfe972rp1q7Zs2aKrr75aH/rQh7Rz584er9+0aZNuvvlmfeYzn9G2bdt0ww036IYbbtCOHTtsjhz95ZqpxZKk50iCAEDKIAkCAC5mfUD3eSAJkqwEoQUFAAd1rQQBkF4yulTWhliP4BwtXrxY1113ncaPH68JEybou9/9rnJycvTyyy/3eP0Pf/hDXXvttfrKV76iyZMn69vf/rYuuugi/ehHP7I5cvSXBVOKZRjS64fqdaSu1elwAABngSQIALiYlTDwxEwQqxKE9hMAHNLYFtLeY/H+3TNJggBpp+t6ihll6A+RSESPP/64mpubNXfu3B6v2bx5s+bPn9/tvoULF2rz5s12hIgBUJSbqYsvGCyJllgAkCpSf1cNADzMmp/hhUoQH4PRAThs+6F6xWLSiIIsDcsNOh0OAJv5TEPWkqqD9QjOw/bt25WTk6NgMKjPf/7zevrppzVlypQer62pqVFxcXG3+4qLi1VTc+bN8/b2djU0NHS7wT0WTi2RJK3dQRIEAFIBSRAAcDErYdC1h3WqCiQqQSJUggBwyDarFdaoAkfjAOAcv8+aUcZ6BOdu4sSJqqio0D/+8Q994Qtf0Cc/+Um9+eab/fozVq5cqfz8/OStrKysX/98nJ9rp8WTIK8eOKnjTe0ORwMAeC8kQQDAxZKD0b3QDiuRyGEwOgCnWEPRZzEUHUhb1lyQUJj1CM5dRkaGxo0bp9mzZ2vlypWaOXOmfvjDH/Z4bUlJiWpra7vdV1tbq5KSkjP+jBUrVqi+vj55q6qq6rf4cf5GDs7W9BH5isak59+sfe8HAAAclfq7agDgYZ4cjE4lCAAHxGKxzqHoVIIAaatzPUISBP0nGo2qvb3naoC5c+dq/fr13e57/vnne50hYgkGg8rLy+t2g7tY1SC0xAIA9yMJAgAulhyMbqb+y7U1GD1M+wkADqhpaNPRxnb5TEPThuc7HQ4Ah1jVtR1h1iM4NytWrNDGjRt14MABbd++XStWrNCGDRv08Y9/XJK0ZMkSrVixInn97bffrrVr1+q+++7Trl279M1vflNbtmzRsmXLnHoK6CfWXJBN7xxXfWvI4WgAAGeS+rtqAOBhXhqMTjssAE6qqKyTJE0szlVWhs/ZYAA4JpCcCcJ6BOfm6NGjWrJkiSZOnKgPfvCDevXVV7Vu3TotWLBAklRZWanq6urk9fPmzdNjjz2mRx55RDNnztSTTz6pZ555RtOmTXPqKaCfjCvK0fiiHIUiMf1lFy2xAMDN/E4HAADonTUYPeCFweiJTQfaYQFwQsWhOkm0wgLSXYBDGThPP//5z8/4/Q0bNpx234033qgbb7xxgCKCk66dVqK3/7JXa3fU6MPlI50OBwDQCypBAMDFrISB3wuD0U02HQA4x6oEYSg6kN46K0E4lAHg/FktsV7cc0wtHWGHowEA9Cb1d9UAwMOs+Rl+L7TDSswEiVAJAsBmkWhM2w/XS6ISBEh3tMMC0J+mDs9TWWGW2kJRvbj7mNPhAAB6QRIEAFwsHI1/QPd7oB2W9RwYjA7Abm8fbVRLR0SDMny6cFiO0+EAcBDtsAD0J8MwdG2iGmTtzhqHowEA9IYkCAC4mCcHo0fZdABgL6sV1oyRBZ54PQVw7qgEAdDfrp0WT4L85a2jag9HHI4GANATkiAA4FKRaEyxRNFEwEz9l2vrOcRitMQCYK/XGYoOIIGZIAD6W3nZYBXlBtXYHtamvSecDgcA0IPU31UDAI8Kd6mY8FI7LInTlwDstS1RCTKToehA2gv4qQQB0L9M00gOSF+7g5ZYAOBGJEEAwKW6zs7we6ESxNf5HMJUggCwSXN7WHtqGyVJ5VSCAGkvg5kgAAaA1RLr+bdqFeb1BQBcJ/V31QDAo7olQbxQCdKlDz8fDADYZcfhekVjUkleporzMp0OB4DDrIMlHbTDAtCPLh1TqILsgE42d+iVAyedDgcA8C4kQQDApbq1w/LAIN+uw4ipBAFgl4qqOknSrLICR+MA4A7JdlhhDmQA6D8Bn6kFk4slSetoiQUArkMSBABcykoU+ExDhpH6SRDDMJLJnDCnLwHYxBqKPpMkCABJgUR1bdfDJgDQH6yWWOt21irKoS8AcBWSIADgUlavai9UgVj89OEGYLOKxFB0KkEASFKGzxqMzgYlgP51+bihGpThU01DmyoShzAAAO5AEgQAXCqSOD3kpSRIINGHm3ZYAOxwtKFNR+rbZBrSjJH5TocDwAWsAxkdtMMC0M8yAz5dTUssAHAlkiAA4FLWCUW/zzsv1dbGA4PRAdjBmgcyvihXg4J+Z4MB4Aq+RIvRWIwDGQD637VT4y2x1u6s4XUGAFzEOztrAOAxXqwE8dOCAoCNGIoO4N2sOWsUpQIYCFdOHKag39TBEy16q7rR6XAAAAkkQQDApZIzQXzeSYIETIaRArAPQ9EBvJuZTIKQBQHQ/wYF/Xr/hGGS4tUgAAB3IAkCAC4VTlaCeOel2me1w+L4Jc7Dxo0btXjxYg0fPlyGYeiZZ5454/VPPfWUFixYoGHDhikvL09z587VunXr7AkWjolGY3qjql4SlSAAOlkFtixFAAwUqyUWc0EAwD28s7MGAB4TiXqxEiQxGJ12WDgPzc3Nmjlzph566KGzun7jxo1asGCB1qxZo61bt+qqq67S4sWLtW3btgGOFE5651iTGtvDygr4NKE4x+lwALiEaTITBMDAmj+5WH7T0O7aRr1zrMnpcAAAkpgQCQAulRyM7qmZIAxGx/lbtGiRFi1adNbXP/jgg92+vvfee/X73/9ef/zjH1VeXt7P0cEtrHkg00fkJ+cRAYCRrAQhCQJgYORnBzRv3FBt3HNMf95erWVXj3c6JABIe3wiBACXiniwHZb1XEL0oICDotGoGhsbVVhY2Os17e3tamho6HZDakkORR9V4GgcANzFZDA6ABtcNy3eEmvNdlpiAYAbeGdnDQA8xpOD0akEgQv84Ac/UFNTkz72sY/1es3KlSuVn5+fvJWVldkYIfpDcij6yAJH4wDgLiaVIABscM3UEvlMQ29WN+jA8WanwwGAtEcSBABcypqb4aU2LtZzCTETBA557LHHdM899+iJJ55QUVFRr9etWLFC9fX1yVtVVZWNUeJ8tYUi2lXdKIlKEADdWZUg5EAADKTCQRmaO3aIJGnNjmqHowEAeGdnDQA8Jhz14EyQxHMJR6kEgf0ef/xx3XbbbXriiSc0f/78M14bDAaVl5fX7YbUseNwvcLRmIbmBDU8P9PpcAC4iJFsh0UWBMDAWjQ93hLrz7TEAgDHkQQBAJeyEgWeSoIk2mFFaMQNm/3617/Wrbfeql//+te6/vrrnQ4HAyw5D6SsILnhCQAS7bAA2Gfh1BKZhrT9cL2qTrY4HQ4ApDWSIADgUlaiIOCldlgm7bBw/pqamlRRUaGKigpJ0v79+1VRUaHKykpJ8VZWS5YsSV7/2GOPacmSJbrvvvs0Z84c1dTUqKamRvX19U6EDxt0JkHynQ0EgOswGB2AXYbmBDVnTLwl1p9piQUAjvLOzhoAeIyVKPB5qBKEwejoD1u2bFF5ebnKy8slScuXL1d5ebnuuusuSVJ1dXUyISJJjzzyiMLhsJYuXarS0tLk7fbbb3ckfgw8ayj6rLLBzgYCwHWsZVWMShAANrhuRqkk6VlaYgGAo/xOBwAA6JmVKLASB16QrATh+CXOw5VXXnnGzavVq1d3+3rDhg0DGxBc5URTu6pOtkqSZlAJAuBdkjNBOI8BwAYLpxbrrt/v0OtVdTp0qkUjB2c7HRIApCUqQQDApazB6F6qBPFTCQJggFmtsC4cNkh5mQFngwHgOiaD0QHYqCg3U5eMLpQkrd1BNQgAOIUkCAC4lJUo8HtqJoiVBGHjAcDA6JwHQissAKezzpZESIIAsMn10+MtsdZsZy4IADjFOztrAOAxViVIwEOVIGbiucTExgOAgZFMgowqcDQOAO5kVYKQAwFgl2unlcgwpNcq61Rd3+p0OACQlkiCAIBLdbbD8s5LtbXxQDcsAAMhGo0lkyDlZQWOxgLAnRJLEdphAbBNcV6mLr4gXqFKSywAcIZ3dtYAwGO8OBjdRx9uAANo3/FmNbaFFfSbmliS63Q4AFyocyaIw4EASCuLptESCwCcRBIEAFzKi4PRraKWKDsPAAaAVQUyfUS+Ah6apwSg/5hUggBwwKLpJZKkLQdPqbahzeFoACD98OkQAFzKGh7upY08Tl8CGEgVVackSbNohQWgF9bhkhhJEAA2Ks3P0kWjChSLSet20hILAOzmnZ01APAYqxLE76VKENphARhADEUH8F4May3CfDIANrtuerwl1rNv0BILAOxGEgQAXMqaCeLz0EwQWlAAGChtoYh2VTdKohIEQO84kAHAKddOi7fEeuXASR1rbHc4GgBILyRBAMClrEqQgOmdl2rTZOMBwMDYcbhe4WhMQ3OCGlGQ5XQ4AFyq80CGs3EASD8jB2drZlm8Jdafd1ANAgB28s7OGgB4TDjRp8FTg9ETpy8jtKAA0M+SrbDKCpLtbgDg3ay1CDNBADhh8Yx4S6w/vU4SBADsRBIEAFyqczC6dzbzGEYKYKBsSyRBypkHAuAMDFpzAnDQ9YkkyKsHT6q6vtXhaAAgfZAEAQCXSg5G93nnpZqNBwAD5fUulSAA0JvOmSAOBwIgLZXmZ+nS0YWKxRiQDgB28s7OGgB4jDUY3U87LAA4o+NN7Tp0qlWGIc0Yme90OABczBq1xoEMAE5ZPDNeDfJHkiAAYBuSIADgUiGrEsRDSRCfwWB0AP2vorJOkjRuWI5yMwPOBgPA1TpngjgcCIC0de20UplGvIq18kSL0+EAQFogCQIALhVJzATxeagdlkk7LAADoIJWWADOksGBDAAOG5Yb1LwLh0qS/vjGEYejAYD04J2dNQDwmHA03jMq4KFKEDYeAAyEZBKEoegA3gMHMgC4QbIl1uskQQDADiRBAMClvDgY3WcyjBRA/4pGYwxFB3DWGIwOwA0WTi1RwGdoV02j9h5tdDocAPA87+ysAYDHhCPemwmSPH3JzgOAfrLveJMa28PKCvg0sTjX6XAAuJy1FolRCQLAQQXZGXr/+GGSpD++zoB0ABhoJEEAwKVCkXg7LL/PQ0kQk3ZYAPrXtsRQ9Okj8j1VOQdgYBhUggBwicUzh0uKzwUhMQsAA4tPigDgUpGoFytB2HgA0L+YBwKgL0zmkwFwiflTihX0m9p3rFlvVjc4HQ4AeBpJEABwqVAyCeKdl2raYQHob1YSZObIAkfjAJAaOgejOxsHAOQE/bp6UpEkWmIBwEDzzs4aAHhMJOrBdlicvgTQj1o7ItpVEx8mSiUIgLNhrUVoPQPADZItsV6nJRYADCSSIADgUp2D0b3zUm1tPERY3wPoBzuO1CsSjWlYblDD8zOdDgdACjCSlSAsRgA476qJRRqU4dPhulZtS1S3AgD6n3d21gDAY7w4GN3HYHQA/agiMRR9VllBctgxAJxJsio16nAgACApK8OnBVOKJcWrQQAAA4MkCAC4lDcHo8d/pdQbQH9IDkUvK3A0DgCpg9acANzGaon17BvVyc+AAID+RRIEAFwqZLXD8nnnpdo6qc3iHkB/sJIg5SRBAJylzgMZzsYBAJb3jR+mvEy/jja269UDJ50OBwA8yTs7awDgMV6sBOlsh+VwIABS3tHGNh2ua5VhSNNH5jsdDoAUYVAJAsBlMvymFk0rlST9vuKww9EAgDeRBAEAlwpHvTcTxMrnRMmCADhP1jyQ8UU5ys0MOBsMgJRhMhgdgAt9qDzeEutPb1SrLRRxOBoA8B6SIADgUsl2WKZ3Xqrpww2gvzAPBMC5MBNZEJYiANzksjFDNDw/U41tYa1/66jT4QCA53hnZw0APMaL7bA6kyAOBwIg5XUmQQY7GwiAlEIlCAA3Mk1DH75ohCTpqdcOORwNAHgPSRAAcKlQxIPtsBLvOmw8ADgf0WhMbxyql0QlCIC+MTiQAcClPlw+UpK0Yc8xHW9qdzgaAPAWkiAA4FLhxKfzgM87L9W0wwLQH9451qSm9rCyAj5NKM5xOhwAKYS1CAC3GleUo5llBYpEY/pDxRGnwwEAT/HOzhoAeEgsFku2w/J5sB1WhOOXAM7DtkQrrOkj8+X3UKIYwMCzllXkQAC40UetlljbaIkFAP2JT40A4ELhLkmCgCcHozscCICUZs0DKacVFoA+ohIEgJv904zhCvgM7TjcoN01jU6HAwCe4Z2dNQDwkK6VEl6aCWId2I6x8QDgPFRU1kliHgiAvjMYjA7AxQoHZeiqiUWSqAYBgP5EEgQAXMgaii55qx2WQTssAOeptSOi3bXxk5GzRhU4GwyAlENVKgC3+8hF8QHpz2w7zOcmAOgnJEEAwIXCkS7tsDzU797HxgOA87T9cL0i0ZiK84Iqzc9yOhwAKcZKglCVCsCtrpo0TAXZAdU2tGvTO8edDgcAPKHPO2sbN27U4sWLNXz4cBmGoWeeeeY9H7NhwwZddNFFCgaDGjdunFavXn0OoQJA+ug6E8RDhSAyaYcF4DxVVJ2SRCssAOfGTLbDcjYOAOhN0O/T4hnDJUlPbqUlFgD0hz4nQZqbmzVz5kw99NBDZ3X9/v37df311+uqq65SRUWFvvjFL+q2227TunXr+hwsAKSLcDTeDivgM5ItpLwg2Q6LJAiAc2QNRZ9VNtjZQACkJIPB6ABSwD/PjrfE+vOOGp1q7nA4GgBIff6+PmDRokVatGjRWV+/atUqjRkzRvfdd58kafLkyXrppZf0wAMPaOHChX398QCQFqx2WH7TO62wpC7tsKLvcSEA9IKh6ADOR7IShFIQAC42Y2S+pg7P084jDfrda4d02/vGOh0SAKS0Ad9d27x5s+bPn9/tvoULF2rz5s29Pqa9vV0NDQ3dbgCQTqx2WH4v9cJS12GkbDwA6LujDW06Ut8m04hvDgBAX3XOBHE4EAA4A8MwdMucUZKkx16ppJ0wAJynAU+C1NTUqLi4uNt9xcXFamhoUGtra4+PWblypfLz85O3srKygQ4TAFwlHImXSvh9HkuCJN51SIIAOBfbEq2wJhTnalCwzwXNAMCBDAAp40OzRmhQhk/7jjXrH/tPOh0OAKQ0V/ZZWbFiherr65O3qqoqp0MCAFtZlSA+j7XD6tx4cDgQACmpcx5IgaNxAEhdBoPRAaSInKBf/2PWCEnSf/+j0uFoACC1DfjuWklJiWpra7vdV1tbq7y8PGVlZfX4mGAwqLy8vG43AEgn1kyQgNcqQZIzQdh5ANB3zAMBcL5Mk0oQAKnj44mWWGt3VOtEU7vD0QBA6hrwJMjcuXO1fv36bvc9//zzmjt37kD/aABIWeGoN9th+WiHBeAcRaIxvXGoTpI0a1SBo7EASF3WuDWWIgBSwbQR+ZoxMl+hSExPbj3kdDgAkLL6nARpampSRUWFKioqJEn79+9XRUWFKivjpXkrVqzQkiVLktd//vOf1759+/TVr35Vu3bt0o9//GM98cQT+tKXvtQ/zwAAPKhzMLq32mEZiUqQCDsPAPpo79EmNXdElJ3h0/iiXKfDAZCimAkCINXccmm8GuTXr1RSUQ8A56jPu2tbtmxReXm5ysvLJUnLly9XeXm57rrrLklSdXV1MiEiSWPGjNGzzz6r559/XjNnztR9992nn/3sZ1q4cGE/PQUA8J6QNRjd9FglSLIdlsOBAEg5FVWnJEnTR+TL57HXRgD26ZwJwkYigNSweOZw5QT9OnCiRZv3nXA6HABISf6+PuDKK69U7AwLxtWrV/f4mG3btvX1RwFA2ookB6N7a6PPOn15pvcRAOhJcig6rbAAnIfOShCHAwGAszQo6NeHy0fov14+qMf+UanLxw11OiQASDne6rMCAB7RORjdWy/T1ulL2mEB6KvXDtZJki4aNdjZQACkNCsJInEoA0DquCUxIH3dzhpV17c6HA0ApB5v7a4BgEckZ4J4bjA6py8B9F1jW0h7jjZKksqpBAFwHroW2bIeAZAqJpfm6bKxhQpHY/rlpoNOhwMAKYckCAC4UNijM0GSLSjYdQDQB28cqlcsJo0cnKWi3EynwwGQwowulSDMBQGQSj5zxVhJ8QHpLR1hh6MBgNRCEgQAXChkVYKY3nqZNhlGCuAcbKuMD0UvpxUWgPPUvRKE9Qj6buXKlbrkkkuUm5uroqIi3XDDDdq9e/cZH7N69WoZhtHtlplJUh99c/WkIl0wJFv1rSH9bushp8MBgJTird01APCISDRRCeKxdlgm7bAAnIPXKuskSRfRCgvAeeo+E8TBQJCyXnzxRS1dulQvv/yynn/+eYVCIV1zzTVqbm4+4+Py8vJUXV2dvB08SEsj9I3PNHTrvNGSpEf/fkARPlQBwFnzOx0AAOB0oYhVCeKxJAjtsAD0USwWoxIEQL8xaYeF87R27dpuX69evVpFRUXaunWr3v/+9/f6OMMwVFJSMtDhweNuvLhMD7zwtvYfb9baHTW6fkap0yEBQEqgEgQAXCiSHIzurZdpn5UEYdMBwFk6eKJFp1pCyvCbmlKa53Q4AFKcwWB09LP6+npJUmFh4Rmva2pq0gUXXKCysjJ96EMf0s6dO+0IDx4zKOjXpxLVID/6617F+FwFAGfFW7trAOARXh2MbiRngjgbB1Lbxo0btXjxYg0fPlyGYeiZZ555z8ds2LBBF110kYLBoMaNG6fVq1cPeJzoH68lqkCmDc9Thp+lK4DzQyUI+lM0GtUXv/hFXX755Zo2bVqv102cOFGPPvqofv/73+tXv/qVotGo5s2bp0OHep/r0N7eroaGhm43QJI+NW+0sjN8equ6QX/dfdTpcAAgJfBJEgBcKNkOy2OVINZMkAibDjgPzc3Nmjlzph566KGzun7//v26/vrrddVVV6miokJf/OIXddttt2ndunUDHCn6w7bkPBBaYQE4f13Pl8SizsUBb1i6dKl27Nihxx9//IzXzZ07V0uWLNGsWbP0gQ98QE899ZSGDRumn/zkJ70+ZuXKlcrPz0/eysrK+jt8pKjBgzL0vy67QJL0o79QDQIAZ8Nbu2sA4BHJdlgeqwSx2mGxUMf5WLRokb7zne/owx/+8Fldv2rVKo0ZM0b33XefJk+erGXLlumf//mf9cADDwxwpOgP26qYBwKg/1AJgv6ybNky/elPf9Jf//pXjRw5sk+PDQQCKi8v1969e3u9ZsWKFaqvr0/eqqqqzjdkeMhtV4xRht/Ua5V1VIMg5b164KSe3naIfQIMKJIgAOBCoag322FZTydCPyzYaPPmzZo/f363+xYuXKjNmzc7FBHOVktHWG9VN0qSykcVOBsMAE/oOhOEylSci1gspmXLlunpp5/WX/7yF40ZM6bPf0YkEtH27dtVWtr7UOtgMKi8vLxuN8BSlJepWxOzQb735118vkLKau2I6NO/eFVf+s3r+n3FEafDgYf5nQ4AAHC6iMfbYbFGh51qampUXFzc7b7i4mI1NDSotbVVWVlZpz2mvb1d7e3tya/pw+2M7YfqFYnGVJKXqeEFp/93AoC+MgxDhiHFYlSC4NwsXbpUjz32mH7/+98rNzdXNTU1kqT8/PzkmmLJkiUaMWKEVq5cKUn61re+pcsuu0zjxo1TXV2dvv/97+vgwYO67bbbHHseSH3/cuU4Pf5qlfbUNul3Ww/pY5fQMg3Oi8ViOtbYriE5QfnedajzWGO7dtc0qrq+VRNLcjVjZIFeeKtWje1hSdK3//SmPjBhmAYPykg+5kRTu365+aBefueEVlw3qcfq8K0HT+lXLx9UOBrTyMFZmlicq9kXDNbIwVkyjLM7WLq7plF/33tcraGIJGlcUY5mlRWoOC/zrJ/3jsMNqjhUp/3HmtXUHlJpfpYml+bpminFyb0QOIckCAC4UMij7bC6tqCIxWJnvSAB7LZy5Urdc889ToeR9rZV1UmiCgRA/zINQ5FYTORAcC4efvhhSdKVV17Z7f5f/OIX+tSnPiVJqqyslGl2HmY6deqUPvvZz6qmpkaDBw/W7NmztWnTJk2ZMsWusOFB+dkB/Z+rx+k7z76l+57frUXTS5SbGej3n9PUHlbVyRYdbWxXY1tIDa1hNbSF1NweVkckqnAkpnAkqo7Er+FoLHF/VKFITOFoTIYkn2nINAz5TSP+e9OQz5B8pimfGf++zzTkN035TUN+n/WroYDPTHzPSH4v4DPkM61f44+zfm9dH/DF77fu8/sMGYofyovFYorG4l0Koon3hGgslnh/iH8vGo0l3y+iXe5L/j4WUzQaf46RaEyhxPMPJ3+NKRSNKhKJKRSNX2sYkiElPwtHY7H431PiMaZhyDSU/DuKRmNqD0cVikTVEY7KNAxlBkxlZviU6ffJNAy1hSNq7YioLRRRRySqzIBP2QGfsjJ8CvhMNXeE1dQWVmNbWC0dYWVn+FU4KEODszM0eFBArR0RHW9q14mmDh1ralfAZ6pscJbKCrNVVpgtQ9Kb1Q3ae7RJh+ta1doRUfmoAs0ZM0TTRuRpaE5QG3Yf0/Nv1eof+07oeFOHRg7O0pK5F+jG2WUaPChDT7xapTue2aGOSLzrRdBv6oXlH9DvKw5Lildqnmju0LeffVPf+8gMNbSF9OO/vqP//sdBtYfjj1n636/pz7e/X/nZ8X/nVSdb9LWn3tDf957o8d/uqMJsLZ5Zqg/NGqEJxbmnfb8tFNHT2w7rVy8f1M4jPR96m1icq0XTS7RoWqkmFOd028PoCEf1j/0n9NzOWr3wVq2q69t6/DM+Uj5C//efZyjgsUOuqYYkCAC4UDixMPD7vJUk6JrTiURjnnt+cKeSkhLV1tZ2u6+2tlZ5eXk9VoFI8T7cy5cvT37d0NDAQFIHvHbQmgdS4GwgADzFNKSIqATBuTmbnvUbNmzo9vUDDzzALDIMiE/MvUD/ufmgKk+26J4/vqkf3DjzrB8bjcZ08GSLaurbdKI5vgF+orlDJxKb4dX1rao82aJTLaEBfAZIVbtqGvXrV3qfVXToVKvuXbNL31+3W1OG5+v1xOGm0UOy1R6Oqrq+TXf+fodeevu4JOl7H5muf/vddj312mH96Y1qmYbUForvi8wcma9TLSFVnmzR15/Zrh/dXK6X9h7X//n1NtW1hOQ3DX30opEaV5SjqlMteuNQvXYeqVflyRY99Nd39NBf39GkklwtmFKsC4flyDQNvXbwlJ7dXq1jjfHq/4DP0BXjhmpYblDhaExvHmnQntpG7U7cHnzhbY0dOkiTS/OUneHTwZMteuNQXTJGScrO8GnOmEKNK8pRbmZAVSdb9NS2w3pq22E1tIX00McvUtDvG6D/IngvJEEAwIW8Ohi9awkoLbFgl7lz52rNmjXd7nv++ec1d+7cXh8TDAYVDAYHOjScQSwW61IJwlB0AP0nXpkaYy0CIOUF/T794MaZuumRzXpy6yFdPalI103vfdZMLBbTht3H9NgrlXr1wEnVnWWCoyA7oJK8TOVlBZSXGVB+VkCDgj5l+MxkVYZVaZHh66zkyEhUZMTUWVURiXbeoomvw9F3VVRE4xUmVnVF/NeYwtHOSotu9yUqTpJVGN2u7X5NNBZLVqWYRrwiwzQkn2HEf28q8b34/dbvjcTv44/tfJx1n/X8/VY1i/V3kqhmsSpSJCUrT2KKJe+3HpusQEn8ffkTf3aGP36LxeIVDG2hiFpDEUWi8c33rETlh9801BaKqjVxTXs4ouwMv3Iz/crNDCg7w6fm9rBOtXToZHNIp5o7lJXh09CcDA3NCWpoTlDt4agqT7ao6lSLqk62KByJaVJpriYW52pUYbZM09CWAye15eAp7apuVGsooguHDdJ100v1/gnDNKE4V2t3VOs/N8crLKwEyL8umKClV43Tm9UNWvyjl7Rh9zFJ0tThebrpklFqbAvrP/6yV/Wt8X+XM0bm68vXTNT7xg/VG4fq9dGHN+nZN6q1bkeNwok38Rkj8/Wjmy/SqCHZ3f7NtnSEtf6to/p9xRG9uOeodtU0aldN42n/tofnZ+rTV4zRRy8a2a0NlyTVtXTohbeOau2Oam18+7j2HW/WvuPN3a4ZmhPUgilFWjClWPMuHKrMQPckx8KpJVr62Gt64a2j+kPFEd14MQfrnEISBABcKOTVmSBG1yQIOw84N01NTdq7d2/y6/3796uiokKFhYUaNWqUVqxYocOHD+s///M/JUmf//zn9aMf/Uhf/epX9elPf1p/+ctf9MQTT+jZZ5916ingLByua9Wxxnb5TUPTR+Q7HQ4AD7HWI1GyIAA84NIxhfrCBy7Ujze8oxVPbVdxXqZmX9D9AEkoEtUfKo7okY37tLu2cyM46Dc1cnCWhgwKakhORvw2KKihORkqystU2eBslRVmDUibLaS2hVNLJMXfS+tbQ6clEG66ZJRuumSU9tQ2at2OGpWPGqwrxg+VJE0bka+PXjRST249JEn6cPkISdJt7xurz1wxRgdOtKixLaTpI/KT7admlhXoywsn6nt/3hVvsWZIH5tdpns+NPW0xIMkZWf4tXjmcC2eOVx1LR1au6NG2yrrtP9Es9rDUZWXFWjOmELNn1Lca5uqguwM/fPskfrn2SPV2BbS3/ceV019m5rawxqWG9RFowYnK0t6M39KsT45b7Qe2bhPbxyqJwniIJIgAOBCkWi8pDLgsUoQH0kQ9IMtW7boqquuSn5tta365Cc/qdWrV6u6ulqVlZXJ748ZM0bPPvusvvSlL+mHP/yhRo4cqZ/97GdauHCh7bHj7L1WWSdJmjI8r8cPNgBwrqzlFUsRAF7xxfkTtOmdE6qoqtNNP9msL84fr8vGDlEkGtNfdx/THyoO60hiXsGgDJ9uvnSU/mnmcE0dnsecApwX0zROS4B0NaE4t8d5HF++ZqL+vL1a4WhMi2cOT95vGIbGDB3U45/1+Q9cqOunl8rvM1SQlaGsjLP7jFCQnaH/eeko/c9LR53V9T3JzQzo2mm9V1mdyZTSPEnSrpqe547AHiRBAMCFrMHoPtNbC9Kuc9A5fIlzdeWVV56xH/fq1at7fMy2bdsGMCr0t22ViXkgZQXOBgLAc8wuw2gBwAsy/KZ+ddscfe13b+hPb1TrB8/tOe2aoTlBffqK0fr4nAuUn0VlB5xVkp+pP/yfK9QRjqo4L/OsH1dWmP3eF7nMpNJ4EmhXdaNisVi34eqwD0kQAHAh7w5G73w+EbIgAM5gW6IS5KILmAcCoH9ZyxGSIAC8JCfo13/cXK7Lxw3VU68dUm1Du9pCEc29cIjmTy7WginFVNfCVS4cluN0CLYYOzRHAZ+hxvawDte1auTg1EvkeAFJEABwobBHB6P7ujyfM53kB5De2kIR7TxSL0kqLyMJAqB/mcnBtA4HAgD9zDAM3XzpKN18Hm1/APSvDL+pC4flxIezVzeSBHGIt/qsAIBHhD07GL3z91SCAOjNziMNCkViGjIoQ2WFWU6HA8BjrMpUDmQAAAA7TGYuiOO8tbsGAB5hJQgCHmuHZXQbjO5gIABcLTkPZNRgeuYC6Hdmsh2Ws3EAAID0MKkkPhfkrepGhyNJXyRBAMCFQomZID6PtcOSOp8Tpy8B9MaaB1I+qsDROAB4k8FgdAAAYKNJiUqQt6gEcQxJEABwIWsmSMD03su0ldeJsPEAoBedlSAFzgYCwJNMBqMDAAAbTS6NV4IcON6s1o6Iw9GkJ+/trgGAByQHo3usHZbU2YebFhQAelJT36Yj9W0yDWnmyAKnwwHgQZ0zQRwOBAAApIVhOUENGZShaEx6+ygtsZxAEgQAXCjs4XZYySQIWRAAPbCqQCaW5GlQ0O9wNAC8yKQdFgAAsJFhGJpUas0FoSWWE0iCAIALJdth+bz3Mk0LCgBnsq2qThKtsAAMHIPB6AAAwGajCrMlSTX17Q5Hkp68t7sGAB7g6UoQk3ZYAHqXnAdSVuBsIAA8i0oQAABgt4zEIddwNOpwJOmJJAgAuFDEmgnixSRIYuMhQhYEwLt0hKN641C9JOmiCwY7HA0Ar7KWVzGSIAAAwCZWp4+OCEkQJ5AEAQAX6hyM7r2Xaau6hY0HAO+2q6ZB7eGo8rMCGjNkkNPhAPCozkoQhwMBAABpw9rfCUdYgDjBe7trAOAB3q4Eif/KxgOAd9tWWSdJmlVWkGydBwD9LTkThMUIAACwSYYvvgAJUQniCJIgAOBCViWIF2eCGLTDAtCL1xLzQC4aRSssAAOHShAAAGA3qx0WSRBnkAQBABfyciWIj2GkAHphVYKUjypwNA4A3mYlQWjNCQAA7BLwJ2aChFl/OIEkCAC4UDgaPxngxXYwne2weOMH0Ol4U7sqT7ZIkmaWFTgbDABPM2jNCQAAbEYliLNIggCAC0Ui3q0EsRI7bDwA6KoiUQUyrihH+VkBZ4MB4GkmVakAAMBm1kwQ69Ar7EUSBABcyMszQdh4ANCTznkgBc4GAsDzzMSnYNYiAADALn4f7bCcRBIEAFzI+lDuN733Mp1sh0UpCIAuOueBMBQdwMDqnAnicCAAACBt0A7LWd7bXQMAD/B0JQjtsAC8SzgSVUVVnSSGogMYeAZVqQAAwGaBRDsskiDOIAkCAC7k6ZkgiY2HCFkQAAm7ahrVGoooN+jXhKJcp8MB4HEmg9EBAIDNMqgEcRRJEABwIU9XgiSeUozTlwASth6MzwMpv2BwsloMAAYK88kAAIDdOtthsf5wAkkQAHAhq0rC7/PeZmDnxoPDgQBwDSsJcvEFzAMBMPA4kAEAAOzmpx2Wo0iCAIALhaPxN0VvVoIk2mGx8QAgwUqCzCYJAsAGBgcyAACAzWiH5SySIADgMtFoLPmh3Gd4LwniM2lBAaBTdX2rDte1yjSkmWUFTocDIA10zgRhLQIAAOwR8NMOy0kkQQDAZbpWSPhN771MJzceOH4JQNJrB+skSZNK8pQT9DsbDIC0QGtOAABgN2smSEeYShAneG93DQBSXKTLJ3KfB2eC0IICQFe0wgJgNysJwkwQAABglwAzQRxFEgQAXKZrEsTvwZkgtMMC0NXWysRQ9NEkQQDYw6AdFgAAsJk1EyTMiVBHkAQBAJfp+obozcHo8V9phwWgLRTRzsP1kqSLRpEEAWCPZDssDmICAACb+K3B6LTDcgRJEABwmW7tsDw4GJ0+3AAsr1fVKRyNqSg3qJGDs5wOB0CaYDA6AACwm9UOq4N2WI4gCQIALhNOHEs0Dcn0ZCUI7bAAxFmtsGZfMDg5LwgABlrnTBCHAwEAAGnDaofFTBBnkAQBAJexKkH8pjdfoq2nRRIEwGsMRQfgAIMDGQAAwGaBRBIkGuveAQT28OYOGwCksHAk/mboxXkgEpUgAOJisZi2kgQB4IDOdljOxgEAANJHwN+5DU81iP1IggCAy1gnAryeBOE9H0hv+48361RLSBl+U1OH5zsdDoA0woEMAABgN3+XPR6SIPYjCQIALhP2eBLEel5sPADpzaoCmTkyXxl+lqQA7GO15oyxFgEAADax2mFJUijCGsRufOIEAJexkgN+jyZBrKfFxgOQ3qwkyEW0wgJgs86ZIA4HAgAA0obPNJKHQqkEsR9JEABwGa/PBDFohwVAnUmQiy8odDgSAOmGdlgAAMAJAV98DdIRZkPEbiRBAMBlrJkgXq0E8bHxAKS9+paQ3j7aJEm6aFSBs8EASDsMRgcAAE6wWmJRCWI/kiAA4DLhaPzN0OfzZhLE6sNNEgRIX69VxatAxgwdpCE5QYejAZBurEoQWnMCAAA7ZSSSIGFOYtiOJAgAuExnJYg3X6KTfbh50wfS1mvWPJBRzAMBYD8jWQnCWgQAANjHTzssx3hzhw0AUph1IsCrM0F8DCMF0p41D2Q2Q9EBOMBkLQIAABxAOyznkAQBAJfx+kwQk9OXQFoLR6KqqKqTJF08miQIAPuxFgEAAE7ISCZBWIPYjSQIALiMVQlinVL0GtNkMDqQznbVNKqlI6LcTL/GDctxOhwAaahzJojDgQAAgLRCJYhzSIIAgMtEEoPR/V4djJ7YeOA9H0hPW7vMAzE9WvEGwN2YTwYAAJwQ8MfXICRB7EcSBABcxnov9OpMEFpQAOmNeSAAnNa5FnE2DgAAkF4CtMNyDEkQAHCZZCWIR5MgVnInRhIESEskQQA4rXMwOmsRAABgn4BJOyynkAQBAJexZoJ4tRLEoB0WkLaq61t1uK5VpiHNKitwOhwAacpaYnEgAwAA2Il2WM4hCQIALhNJJEH8pjdfon2cvgTS1iv7T0qSpo3I16Cg3+FoAKSr5EwQliIAAMBGVjusjjBJELt5c4cNAFJYOOLtShBOXwLpy0qCXDK60OFIAKQz2mEBAAAnWEmQMCcxbEcSBABcprMSxJtJkGQ7LDYegLRjJUEuHUMSBIBzrCUWaxEAAGCnDB8zQZxCEgQAXMbrM0Gs58XBByC9nGzu0NtHmyRRCQLAWWZiLUIOBAAA2Mnvi69BaIdlP5IgAOAykWj8zdCrSRDraUXJggBp5dUD8SqQ8UU5KhyU4XA0ANKZwVoEAAA4IJCsBGENYjeSIADgMl6vBDFN+nAD6YhWWADcwmQwOgAAcECAdliOIQkCAC7j9ZkgbDwA6YkkCAC3SFalciADAADYKCPRDoskiP1IggCAy0SSlSDefIlODiMlCwKkjab2sHYeqZdEEgSA83yGNROEtQgAALAP7bCc480dNgBIYWGPV4Kw8QCkn60HTykak8oKs1San+V0OADSnEFVKgAAcEDATzssp5AEAQCXSVaC+LyZBLE2HiIkQYC08cr+E5KkS0cPcTgSAOjampO1CAAAsE/ApB2WU0iCAIDLeL0ShJkg6A8PPfSQRo8erczMTM2ZM0evvPLKGa9/8MEHNXHiRGVlZamsrExf+tKX1NbWZlO0eHX/KUnSpWMGOxwJAHSdCeJsHAAAIL0wGN05JEEAwGUi0fiboc+jSZDEez7tsHDOfvOb32j58uW6++679dprr2nmzJlauHChjh492uP1jz32mL72ta/p7rvv1ltvvaWf//zn+s1vfqOvf/3rNkeentpCEVVU1UmSLh1DJQgA55kmrTkBAID9rHZYHWHWIHYjCQIALmNVglizM7wm2Q6L45c4R/fff78++9nP6tZbb9WUKVO0atUqZWdn69FHH+3x+k2bNunyyy/XLbfcotGjR+uaa67RzTff/J7VI+gfr1fVqSMS1bDcoEYPyXY6HACQkawEYS0CAADsY1WChKNUgtiNJAgAuEwk4u2ZIFaFCzkQnIuOjg5t3bpV8+fPT95nmqbmz5+vzZs39/iYefPmaevWrcmkx759+7RmzRpdd911vf6c9vZ2NTQ0dLvh3Lx64KQk6dLRhckkKAA4idacAADACRk+ZoI4xe90AACA7rw/EyT+a5SdB5yD48ePKxKJqLi4uNv9xcXF2rVrV4+PueWWW3T8+HFdccUVisViCofD+vznP3/GdlgrV67UPffc06+xp6t/7E8kQcYUOhwJAMSZVIIAAAAH+H20w3IKlSAA4DLWB3Kf6c2X6M7Tl7zpwx4bNmzQvffeqx//+Md67bXX9NRTT+nZZ5/Vt7/97V4fs2LFCtXX1ydvVVVVNkbsHeFIVFsPWkPRSYIAcAdrLcJSBAAA2InB6M6hEgQAXMb7lSC0oMC5Gzp0qHw+n2pra7vdX1tbq5KSkh4fc+edd+oTn/iEbrvtNknS9OnT1dzcrM997nP6xje+IbOHhGMwGFQwGOz/J5Bmdh5pUEtHRHmZfk0sznU6HACQ1DmfjAMZAADATgHaYTnGm8eMASCFJWeCeDYJEv81wsYDzkFGRoZmz56t9evXJ++LRqNav3695s6d2+NjWlpaTkt0+Hw+SVKMf4cDatM7JyRJl44ZItOjr2kAUk9nOyxn4wAAAOklg0oQx1AJAgAu4/VKECu5w+YzztXy5cv1yU9+UhdffLEuvfRSPfjgg2pubtatt94qSVqyZIlGjBihlStXSpIWL16s+++/X+Xl5ZozZ4727t2rO++8U4sXL04mQzAwNr1zXJJ0+bghDkcCAJ1ozQkAAJzQ2Q6LNYjdSIIAgMtEovETAV6tBEm2oODgA87RTTfdpGPHjumuu+5STU2NZs2apbVr1yaHpVdWVnar/LjjjjtkGIbuuOMOHT58WMOGDdPixYv13e9+16mnkBbawxG9eiA+FH3ehUMdjgYAOllLLA5kAAAAOwX8VII4hSQIALiM1ytBrNOXtMPC+Vi2bJmWLVvW4/c2bNjQ7Wu/36+7775bd999tw2RwbKtsk5toaiG5mRoQnGO0+EAQBIHMgAAgBMCJjNBnMJMEABwmUjU2zNBEtWfnL4EPM6aBzL3wqHJDUcAcAPaYQEAACd0VoKwBrEbSRAAcJlwMgnizZdoazM0wjRSwNM27U3MA7mQeSAA3IXB6AAAwAnWTJCOMJUgdvPmDhsApLCIx9th+ZKnLx0OBMCAaW4Pq6KqThLzQAC4j1UJQlUqAACwU8AXX4OE6clpO5IgAOAyXm+HZRW40IIC8K5X9p9UOBrTyMFZGjUk2+lwAKAbI1kJwloEAADYJ8NHOyynkAQBAJdJVoL4PJoEoQ834Hkv7jkmSXrfeKpAALiPSVUqAABwgNUOK0Q7LNuRBAEAl7HKIj1bCWJtPPCeD3jWht1HJUkfmFDkcCQAcDqqUgEAgBOsw64dETZE7EYSBABcxuszQawkSISNB8CTDhxv1oETLfKbhi4fx1B0AO7TORPE4UCQklauXKlLLrlEubm5Kioq0g033KDdu3e/5+N++9vfatKkScrMzNT06dO1Zs0aG6IFALhJZzsskiB2IwkCAC4TTs4E8eZLtJXbYRgp4E1WK6yLRw9WbmbA4WgA4HQGrTlxHl588UUtXbpUL7/8sp5//nmFQiFdc801am5u7vUxmzZt0s0336zPfOYz2rZtm2644QbdcMMN2rFjh42RAwCcZrXDisY6D8DCHn6nAwAAdOf5ShCTPtyAl9EKC4DbmQxGx3lYu3Ztt69Xr16toqIibd26Ve9///t7fMwPf/hDXXvttfrKV74iSfr2t7+t559/Xj/60Y+0atWqAY8ZAOAOAX/nYddQJCqf6XMwmvTizWPGAJDCwpH4B3LTq0kQqx0WWRDAc9pCEW3ed0KSdOXEYQ5HAwA9YzA6+lN9fb0kqbCwsNdrNm/erPnz53e7b+HChdq8eXOvj2lvb1dDQ0O3GwAgtQV8nfs8tMSyF0kQAHAZr1eCJKo/aYcFeNA/9p9UWyiqkrxMTSrJdTocAOgRrTnRX6LRqL74xS/q8ssv17Rp03q9rqamRsXFxd3uKy4uVk1NTa+PWblypfLz85O3srKyfosbAOCMgNm1EoR1iJ3OKQny0EMPafTo0crMzNScOXP0yiuvnPH6Bx98UBMnTlRWVpbKysr0pS99SW1tbecUMAB4nTUw3OfRJIjBYHTAs9btjG/mXDVpWPL/dQBwG4NKEPSTpUuXaseOHXr88cf7/c9esWKF6uvrk7eqqqp+/xkAAHuZppHc66ESxF59ngnym9/8RsuXL9eqVas0Z84cPfjgg1q4cKF2796toqLTez8/9thj+trXvqZHH31U8+bN0549e/SpT31KhmHo/vvv75cnAQBe4vVKkGQLCt7vAU8JR6JatyOeBFk0rdThaACgdyaD0dEPli1bpj/96U/auHGjRo4cecZrS0pKVFtb2+2+2tpalZSU9PqYYDCoYDDYL7ECANwj4DMUicbUEWZTxE59rgS5//779dnPfla33nqrpkyZolWrVik7O1uPPvpoj9dv2rRJl19+uW655RaNHj1a11xzjW6++eb3rB4BgHQVTmQHvFoJ4mPjAfCkV/af1InmDhVkBzT3wiFOhwMAveocjO5sHEhNsVhMy5Yt09NPP62//OUvGjNmzHs+Zu7cuVq/fn23+55//nnNnTt3oMIEALhUINEjPMxCxFZ9SoJ0dHRo69at3QZ6maap+fPn9zrQa968edq6dWsy6bFv3z6tWbNG11133XmEDQDeFYlYlSDeHNvUufHAGz7gJc9ur5YkLZxSklzYA4AbWZUgzATBuVi6dKl+9atf6bHHHlNubq5qampUU1Oj1tbW5DVLlizRihUrkl/ffvvtWrt2re677z7t2rVL3/zmN7VlyxYtW7bMiacAAHBQRuKzEu2w7NWndljHjx9XJBLpcaDXrl27enzMLbfcouPHj+uKK65QLBZTOBzW5z//eX3961/v9ee0t7ervb09+XVDQ0NfwgSAlGadBvBqJYhp0ocb8JpINJacB3LdDFphAXA3gwMZOA8PP/ywJOnKK6/sdv8vfvELfepTn5IkVVZWyuxyoGnevHl67LHHdMcdd+jrX/+6xo8fr2eeeeaMw9QBAN5kHRijHZa9+jwTpK82bNige++9Vz/+8Y81Z84c7d27V7fffru+/e1v68477+zxMStXrtQ999wz0KEBgCslZ4L4PJoEoR0W4Dmv7D+p403xVljzaIUFwOWYT4bzcTYVRBs2bDjtvhtvvFE33njjAEQEAEgl1l4PlSD26lMSZOjQofL5fH0a6HXnnXfqE5/4hG677TZJ0vTp09Xc3KzPfe5z+sY3vtHtdIRlxYoVWr58efLrhoYGlZWV9SVUAEhZnq8EsU5fUgoCeMZvt1RJohUWgNTAgQwAAOCUznZYrEPs1KdPqRkZGZo9e3a3gV7RaFTr16/vdaBXS0vLaYkOn88nqfcTFMFgUHl5ed1uAJAurEoQa4C419AOC/CWY43t+uMbRyRJt8wZ5XA0APDerAMZ5EAAAIDdkoPRqQSxVZ/bYS1fvlyf/OQndfHFF+vSSy/Vgw8+qObmZt16662S4gPARowYoZUrV0qSFi9erPvvv1/l5eXJdlh33nmnFi9enEyGAAA6hRO9GbxbCRJ/XhGyIIAn/PqVSoUiMc0qK9DMsgKnwwGA92RQCQIAABwS8MfXIR0kQWzV5yTITTfdpGPHjumuu+5STU2NZs2apbVr1yaHpb97ANgdd9whwzB0xx136PDhwxo2bJgWL16s7373u/33LADAQ6z+1F6dCWJVuJxNP2UA7tYRjupXLx+UJN16+WhngwGAs2QyGB0AADgkQDssR5zTYPRly5Zp2bJlPX7v3QPA/H6/7r77bt19993n8qMAIO14vRLESG48OBsHgPP35x3VOtrYrmG5QS2aVup0OABwVqzWnORAAACA3QKmlQShEsROTK4EABeJRmPJ5IDf9OZLdLIdFjsPQEqrbw3p3jVvSZI+cdkFyvB78zULgPdQCQIAAJxitcMiCWIvPq0CgIt0TQx4tRLEZ9IOC/CClWveUm1Du8YMHaTPvX+s0+EAwFnrnAnicCAAACDtWO2wOsIkQexEEgQAXKTrsHC/R5Mg1tNiMDqQuta/VavHX62SJP3fj85QZsDncEQAcPZMBqMDAACHWEmQMHsitjqnmSAAgIHR9U3Qq5UgnL4EUlc0GtPDL76j+57bLUlaMvcCXTqm0OGoAKBvrCUWORAAAGC3DB8zQZxAEgQAXCQS8X4liJXc4fQl0tk7x5pUdbIl+XW3/xve9b9GrMsd7/7fpuvX7/4/qmvLudO/9+6Iev8ZbeGITjaH9HZto/729nEdrmuVJH3kohH6+nWT3/0HAYDrUQkCAACcEvDF1yG0w7IXSRAAcJFwtPNN0KuVIMlhpJSCII098WqVfrJxn9NhnJPsDJ/uXjxFH7u4LFnZBQCpxGAwOgAAcIg/WQnCOsROJEEAwEWsORmmIc9uLpq0wwJUlJepqcPzkl+/+393Q0aP3+t22bseZPTyrXe/knR9ben9MZ1fBPyGCgcFNTw/U3MvHKI5Y4YoK4MZIABSF2sRAADglADtsBxBEgQAXCSSOJHoN02HIxk4ZqIUJMLpS6Sxz1wxRp+5YozTYQBAWrKSIDHWIgAAwGYZiXZYYZIgtvLuLhsApKBwohzSq62wpK7DSNl4AAAA9ku25mQpAgAAbGZVgnTQDstWJEEAwEWsdlheHYouST5aUAAAAAcZDEYHAAAOsWaCMBjdXiRBAMBFwonMgM/n3SSItfEQIQsCAAAckKwEYS0CAABsZh165TCGvUiCAICLpEUlSJfnRkssAABgt86ZIA4HAgAA0k5yTiqHMWxFEgQAXCQcjZdDpsNMEImWWAAAwH4m7bAAAIBDrBbhEdYhtiIJAgAu0lkJ4t2XZ6sdlsTJBwAAYD+DwegAAMAhiZEgtOW0mXd32QAgBVkzQTycA+lW5cIJTAAAYDeTXtwAAMAh1jokTBLEVh7eZgOA1JMOlSDd22Hxpg8AAOxlrUVYhgAAALslB6OTBLGVd3fZACAFWUkQb88E6VoJ4mAgAAAgLTETBAAAOMVkJogjSIIAgIt0VoKkSxKEN30AAGAvaynC5gMAALCbdeiVGan2IgkCAC4STotKkM7fU/4JAADsZh3IiMWkGIkQAABgIx+zyRxBEgQAXCQSjUrydiVI98HoDgYCAADSUteqVPYfAACAnZLtsNgQsRVJEABwkXDE+5UgRpeNB970AQCA3bpVpZIFAQAANupsh+VwIGmGJAgAuEjnTBBvvzxbb/q0oAAAAHYzqUoFAAAO8Rm0w3KCt3fZACDFWDNBPJ4DSZ7AZOMBAADYrWs7LDYgAACAnUwGozvC49tsAJBa0qUSxGqJFWHjAQAA2KxrOyyWIgAAwE6+xHYPBzHs5e1dNgBIMVYliJdngkhdyj85+QAAAGxGJQgAAHAKg9GdQRIEAFwkmqwE8XYSpLMdFm/6AADAXgaD0QEAgEN8tMNyBEkQAHCRdKkEMZODwBwOBAAApJ3ulSAOBgIAANIOg9GdQRIEAFwkEo1Kkvw+jydBTN70AQCAM7omQWKsRQAAgI0YjO4MkiAA4CKdlSDefnlOtsPiTR8AANjM7NYOy7k4AABA+vExE8QR3t5lA4AUE0mTmSA+k3ZYAADAGQaD0QEAgEN8ic4fEdYgtiIJAgAuki4zQQx6YAIAAAclq1JZiwAAABt1VoI4HEiaIQkCAC5iVYL4DG8nQayNB8o/AQCAE6y5IORAAACAnZKdMdgPsRVJEABwkXAkkQTx+GB0HxsPAADAQSZVqQAAwAHWGoR2WPYiCQIALmK9CXp9JojBmz4AAHCQkWyH5WwcAAAgvVAJ4gySIADgIpFovCmk12eCdA5G500fAADYL1kJwgYEAACwkS+xG8+hUHuRBAEAF7EGo3u9EsR6ejHe9HGOHnroIY0ePVqZmZmaM2eOXnnllTNeX1dXp6VLl6q0tFTBYFATJkzQmjVrbIoWAOA2nWsRZ+MAAADpJdkOi4MYtvI7HQAAoFPEmgliejtH3fmm73AgSEm/+c1vtHz5cq1atUpz5szRgw8+qIULF2r37t0qKio67fqOjg4tWLBARUVFevLJJzVixAgdPHhQBQUF9gcPAHAFZoIAAAAn0A7LGSRBAMBF0qYShHZYOA/333+/PvvZz+rWW2+VJK1atUrPPvusHn30UX3ta1877fpHH31UJ0+e1KZNmxQIBCRJo0ePtjNkAIDLdM4EYS0CAADsw2B0Z3j7qDEApBirHNLrM0Gsp8fJB/RVR0eHtm7dqvnz5yfvM01T8+fP1+bNm3t8zB/+8AfNnTtXS5cuVXFxsaZNm6Z7771XkUik15/T3t6uhoaGbjcAgHd0HshwOBAAAJBWrP0eOmPYiyQIALhI2lSCGGw84NwcP35ckUhExcXF3e4vLi5WTU1Nj4/Zt2+fnnzySUUiEa1Zs0Z33nmn7rvvPn3nO9/p9eesXLlS+fn5yVtZWVm/Pg8AgLOstQjzyQAAgJ18dMZwBEkQAHCRSDR+FMBMmyQIb/oYeNFoVEVFRXrkkUc0e/Zs3XTTTfrGN76hVatW9fqYFStWqL6+PnmrqqqyMWIAwEBLVqWyFAEAADZiMLozmAkCAC6SNpUgiRQ8PTDRV0OHDpXP51NtbW23+2tra1VSUtLjY0pLSxUIBOTz+ZL3TZ48WTU1Nero6FBGRsZpjwkGgwoGg/0bPADANQwOZAAAAAcwGN0ZVIIAgItE02QmiI8WFDhHGRkZmj17ttavX5+8LxqNav369Zo7d26Pj7n88su1d+9eRaOdTVf37Nmj0tLSHhMgAADvMxmMDgAAHGAdeg2TBLEVSRAAcJF0qQQxDAaB4dwtX75cP/3pT/XLX/5Sb731lr7whS+oublZt956qyRpyZIlWrFiRfL6L3zhCzp58qRuv/127dmzR88++6zuvfdeLV261KmnAABwWOdMEIcDAQAAacVqf05nDHvRDgsAXMTqCenzeTtHzelLnI+bbrpJx44d01133aWamhrNmjVLa9euTQ5Lr6yslGl2/j9UVlamdevW6Utf+pJmzJihESNG6Pbbb9e//du/OfUUAAAOYz4ZAABwgtUZg3ZY9iIJAgAuki6VIFa7L9ph4VwtW7ZMy5Yt6/F7GzZsOO2+uXPn6uWXXx7gqAAAqcJgMDoAAHAAM1Kd4e2jxgCQYiJpMhOEdlgAAMBJVIIAAAAn+Lq05ORgqH1IggCAi6RNJQgbDwAAwEHWUovNBwAAYKeuh14jlKTahiQIALhIJBovjfB6JYhV/kkSBAAAOKGzEsThQAAAQFoxuyZB2BOxDUkQAHCRcCQ92mHRggIAADgpOROELAgAALCR1RlDkqK0CLcNSRAAcJFImrTDSiZBeMMHAAAOoBIEAAA4wUcliCNIggCAi1hvgD7T2y/P1ns+b/gAAMAJZnIoKWsRAABgH9NgJogTvL3LBgApJl0qQayTD2w8AAAAJyTbYbEUAQAANupaCUJbTvuQBAEAF0mXmSAGLSgAAICDmE8GAACc0HW7h+4Y9iEJAgAuki6VIMl2WGRBAACAA6zOoyRBAACAnQzDSO6JUAliH5IgAOAi4cSkcK9XgtAOCwAAOKlzJojDgQAAgLRj7YmESYLYhiQIALhIshLE5+0kiNUOi0oQAADgBIN2WAAAwCFWEoQ9EfuQBAEAF7FOAVinE73Kx0wQAADgIJPB6AAAwCE+DmPYjiQIALiINRg94PP2y3PnxgNv+AAAwH4MRgcAAE4xqQSxnbd32QAgxVgzQbzeDouNBwAA4CTrQAbzyQAAgN2sdljsidiHJAgAuEgoUQniN7398myatMMCAADOMWjNCQAAHOJLzkl1OJA04u1dNgBIMaHEO2DA85Ug8V8p/QQAAE6gNScAAHAK7bDsRxIEAFwkfWaCxN/waUEBAACcYFIJAgAAHMJgdPt5e5cNAFJMKF1mgtAOCwAAOIgDGQAAwCk+KkFsRxIEAFwiEo3J+hwe8PpMENphAQAABxm0wwIAAA6xtnwirENs4+1dNgBIIaEuE7G8Xgni4/QlAABwULIdFgNJAQCAzZLtsDgYahuSIADgEuEub35enwli0IcbAAA4iMHoAADAKQxGt5+3d9kAIIWEu1aCmB6vBEk8vzBv+AAAwAGdM0EcDgQAAKQdqxKEdlj2IQkCAC4RinS++fk8ngSxKl26tgADAACwS2dVKpsPAADAXtaeD2057UMSBABcIpx49wv4jOQHc6/K8MfffjrCvOMDAAD7dbbDcjYOAACQfqyK1DBZENuQBAEAlwgnKkH8pvdfmjMSg9+pBAEAAE4wqQQBAAAO8ftYh9jN+zttAJAirISA9WboZVSCAAAAJ1lnTmJsPgAAAJtZhzE4F2ofkiAA4BLWkHBrXoaXZSSeYwfv+AAAwAGdM0EcDgQAAKQdayZIhIWIbby/0wYAKcKqivB7fCi6JAWoBAEAAA6iHRbOx8aNG7V48WINHz5chmHomWeeOeP1GzZskGEYp91qamrsCRgA4Co+1iG2IwkCAC5BJQgAAIA9rDMnnMDEuWhubtbMmTP10EMP9elxu3fvVnV1dfJWVFQ0QBECANzMasvJOsQ+fqcDAADEhRMJgUAazQRhMDoAAHCCVQnCAUyci0WLFmnRokV9flxRUZEKCgr6PyAAQEqx2mFRCWIf7x83BoAUEYrE3/z86VQJQjssAADggEQOhM0H2GrWrFkqLS3VggUL9Pe///2M17a3t6uhoaHbDQDgDZ2D0VmH2MX7O20AkCLC0fSZCZLBTBAAAOAgk8HosFFpaalWrVql3/3ud/rd736nsrIyXXnllXrttdd6fczKlSuVn5+fvJWVldkYMQBgIDEY3X60wwIAlwhH0mcmSCA5E4Q3fAAAYD8GksJOEydO1MSJE5Nfz5s3T++8844eeOAB/dd//VePj1mxYoWWL1+e/LqhoYFECAB4BOsQ+5EEAQCXsOZj+NNoJkhHOOJwJAAAIB1ZA0ljbD7AIZdeeqleeumlXr8fDAYVDAZtjAgAYBczWQnicCBpxPvHjQEgRYQTZZAB0/svzVYlSIhKEAAA4ACDdlhwWEVFhUpLS50OAwDgAKsSJMJhDNtQCQIALpFOlSBBZoIAAAAHmQxGx3loamrS3r17k1/v379fFRUVKiws1KhRo7RixQodPnxY//mf/ylJevDBBzVmzBhNnTpVbW1t+tnPfqa//OUveu6555x6CgAAB1kzQaKcxrANSRAAcAlrJog/DWaCJNthUfsJAAAcwGB0nI8tW7boqquuSn5tze745Cc/qdWrV6u6ulqVlZXJ73d0dOhf//VfdfjwYWVnZ2vGjBl64YUXuv0ZAID0YTIY3XYkQQDAJcLReEIgYHq/EiTZDotKEAAA4AArCcJMEJyLK6+88oz/dlavXt3t669+9av66le/OsBRAQBShZ8kiO28f9wYAFJEKFkJ4v0kiFUJ0k4lCAAAcIBBOywAAOAQk5kgtiMJAgAu0TkTxPsvzYFEoqcjHOUEJgAAsB3tsAAAgFOsbR8qQezj/Z02AEgR1kyQdGiHFfT5kr8P86YPAABsxmB0AADgFAaj248kCAC4RMiaCZIGlSBWOywpXg0CAABgp86ZIA4HAgAA0g7tsOzn/Z02AEgR4eRMEO+/NAe6zD0JMRcEAADYzDA4gQkAAJxBJYj9vL/TBgApIhyxKkG83w7L7zOTbSioBAEAAHbrbIflbBwAACD9UAliP5IgAOASocSncL+ZHi/NVkusdpIgAADAZp2D0dl8AAAA9rIqQWiMYZ/02GkDgBSQTpUgUufsE9phAQAAu1mVIDGSIAAAwGbJdlisQ2xDEgQAXCKUnAmSHkmQYKISpIMkCAAAsJlBGwoAAOCQZDss+nLahiQIALhEOBpPBqRLOyyrEoSZIAAAwG6dJzAdDgQAAKSdxHYISRAbpcdOGwCkgHCiEiRd2mFZM0FohwUAAOyW7MUdYfMBAADYy8dsMtuRBAEAl+hsh5UeL80ZPgajAwAAZ/gTSZAwJzABAIDNTJN2WHZLj502AEgBne2w0qMSpHMwOm/6AADAXslKkCiHMQAAgL18zASxHUkQAHAJqy1UIF0qQfzMBAEAAM6gEgQAADjF5yMJYrdz2ml76KGHNHr0aGVmZmrOnDl65ZVXznh9XV2dli5dqtLSUgWDQU2YMEFr1qw5p4ABwKs622GlRyVIBoPRAQCAQ3yJdQibDwAAwG7JShBmgtjG39cH/OY3v9Hy5cu1atUqzZkzRw8++KAWLlyo3bt3q6io6LTrOzo6tGDBAhUVFenJJ5/UiBEjdPDgQRUUFPRH/ADgGeE0rQRhMDoAALAblSAAAMApVlvOKOsQ2/Q5CXL//ffrs5/9rG699VZJ0qpVq/Tss8/q0Ucf1de+9rXTrn/00Ud18uRJbdq0SYFAQJI0evTo84saADzI+hAeSJdKENphAQAAh9CLGwAAOMVMVoI4HEga6dNx446ODm3dulXz58/v/ANMU/Pnz9fmzZt7fMwf/vAHzZ07V0uXLlVxcbGmTZume++9V5FIpNef097eroaGhm43APA6qyLCb6ZHJYiV7OmgEgQAANjMRyUIAABwCJUg9uvTTtvx48cViURUXFzc7f7i4mLV1NT0+Jh9+/bpySefVCQS0Zo1a3TnnXfqvvvu03e+851ef87KlSuVn5+fvJWVlfUlTABISeFIulWC+CRRCYJz09f5ZJbHH39chmHohhtuGNgAAQCu5k8OJGUdAgAA7GWaVKTabcCPG0ejURUVFemRRx7R7NmzddNNN+kb3/iGVq1a1etjVqxYofr6+uStqqpqoMMEAMeFEm9+VIIAZ2bNJ7v77rv12muvaebMmVq4cKGOHj16xscdOHBAX/7yl/W+973PpkgBAG7lY/MBAAA4hMHo9uvTTtvQoUPl8/lUW1vb7f7a2lqVlJT0+JjS0lJNmDBBPp8ved/kyZNVU1Ojjo6OHh8TDAaVl5fX7QYAXmcNRvenSSVI0BqMTiUI+qjrfLIpU6Zo1apVys7O1qOPPtrrYyKRiD7+8Y/rnnvu0dixY22MFgDgRn6SIAAAwCG+xI487bDs06ckSEZGhmbPnq3169cn74tGo1q/fr3mzp3b42Muv/xy7d27V9EuZcZ79uxRaWmpMjIyzjFsAPCeznZY6VEJkpF4nlSCoC/OZT6ZJH3rW99SUVGRPvOZz9gRJgDA5XyJyltmggAAALuZVILYrs87bcuXL9dPf/pT/fKXv9Rbb72lL3zhC2pubtatt94qSVqyZIlWrFiRvP4LX/iCTp48qdtvv1179uzRs88+q3vvvVdLly7tv2cBAB4QilqD0dOjEsRK9jATBH1xLvPJXnrpJf385z/XT3/607P+Oe3t7WpoaOh2AwB4B5UgAADAKbTltJ+/rw+46aabdOzYMd11112qqanRrFmztHbt2uRmRGVlpcwu/ezLysq0bt06felLX9KMGTM0YsQI3X777fq3f/u3/nsWAOABViWIP10qQfxUgmDgNTY26hOf+IR++tOfaujQoWf9uJUrV+qee+4ZwMgAAE6yNh+s9RcAAIBdrHVIlEoQ2/Q5CSJJy5Yt07Jly3r83oYNG067b+7cuXr55ZfP5UcBQNqwZoIE0mQmSDIJQiUI+qCv88neeecdHThwQIsXL07eZ7Xo9Pv92r17ty688MLTHrdixQotX748+XVDQ4PKysr662kAABxGJQgAAHBKsh0W6xDbnFMSBADQ/zqsShAzPSpBrHZYISpB0Add55PdcMMNkjrnk/V0QGPSpEnavn17t/vuuOMONTY26oc//GGviY1gMKhgMNjv8QMA3CFZCRJlHQIAAOyVrARhGWIbkiAA4BLWh/B0qQQJUgmCc7R8+XJ98pOf1MUXX6xLL71UDz744GnzyUaMGKGVK1cqMzNT06ZN6/b4goICSTrtfgBA+vD7OIEJAACcYVWCcBjDPiRBAMAl0m0mSHIwOpUg6KO+zicDAODdfIn3iTBJEAAAYLNkW06WIbYhCQIALhFK25kgvOuj7/o6n6yr1atX939AAICUwkwQAADglM52WKxD7MIxSQBwCeskYiBNKkEyqAQBAAAO6ZwJwuYDAACwl8lhDNulx04bALhcLBZLvvlZJxO9LpCsBIk4HAkAAEg3PjYfAACAQ3yJmSDRGOsQu5AEAQAXCHVpBJkuM0GsSpAQTTABAIDNkpUgVKQCAACbWSMsOYxhn/TYaQMAlwtHOz+Ap89MkPjz7Aiz+QAAAOzFTBAAAOAUqxIkQiWIbUiCAIALdKsEMdPjpTnD55PUORAeAADALsl2WGw+AAAAmzEY3X7psdMGAC7XtRVD+lSCWDNBSIIAAAB7WYdOqAQBAAB2MzmMYTuSIADgAuHEB3Cfacgw0iMJYiV72kmCAAAAmyVngpAEAQAANksORmc7xDYkQQDABayWUFZ/6nRgVYLQDgsAANjNWnPFYrSiAAAA9vIxm8x2JEEAwAXCiZkgAV/6vCxnJJ5rB0kQAABgM1+X9qNUgwAAADuZDEa3XfrstgGAiyUrQdJkHojETBAAAOCcrtW3nMIEAAB2YjC6/UiCAIALhBKVINaQznRAOywAAOAUn9m1EoS1CAAAsI/VBIRKEPukz24bALiY9eE7I40qQazWX6FIjNMPAADAVl0PnlAJAgAA7ORLrEMiEdYgdiEJAgAukKwESaeZIP7O5xriBCYAALBRl0IQZoIAAABb+ZgJYrv02W0DABcLp+NMkC4JH+aCAAAAOxmGkZwLQiUIAACwk1WQyhrEPiRBAMAFrBOIgXSaCUISBAAAOMiaC0IlCAAAsFNyMDqVILZJn902AHCxUBpWgphm5wnMEH0wAQCAzawNCPpxAwAAOyXbYXEQwzYkQQDABcJpOBNE6hyOTiUIAACwW2clCOsQAABgHzNZCSLFqAaxRXrttgGAS1kfvgNm+lSCSJ3D0TsiEYcjAQAA6YaZIAAAwAlWJYgUT4Rg4JEEAQAXCCUrQdI0CRLmXR8AANjLl5jFxkwQAABgJ7PLAVgOY9iDJAgAuECyEiTN2mFZw9E7IrShAAAA9qISBAAAOMFndq0EYR1ih/TabQMAl0pWgqRpO6wQSRAAAGAzH0kQAADggK7tsFiH2IMkCAC4gJUESL/B6PE3fgajAwAAu1ltSGmHBQAA7GR22fqJUAlii/TabQMAlwonKkECaTsThCQIAACwF5UgAADACd0Go7MOsQVJEABwAasShJkgAAAA9rDakFqz2QAAAOzgYzC67dJrtw0AXMpqw+A30+tl2Ur6UAkCAADs5kusu9h8AAAAdjIMQ1YxCOsQe6TXbhsAuFQ4WQlCOywAAAA7dFaCsPkAAADsZa1DmAliD5IgAOACocRMEH+6JUESlSAh2mEBAACbJWeCRNh8AAAA9jINZpPZiSQIALiA1Ys63dphJStBSIIAAACbUQkCAACcYh3GYDSZPdJrtw0AXCqcOIFIOywAAAB7JCtBSIIAAACb+QzaYdmJJAgAuEBnO6z0ellODkanEgQAANjMl6wEYR0CAADsZXIYw1bptdsGAC5lffgOmFSCAAAA2IFKEAAA4JRkOywqQWxBEgQAXCBdK0EYjA4AAJzCTBAAAOAUBqPbK7122wDApcKJJICfmSAAAAC28JnxdQibDwAAwG7WGVjWIfYgCQIALmBVQgTM9HpZtipBSIIAAAC7UQkCAACcYg1Gpx2WPdJrtw0AXCoUtdphpVclCIPRAQCAU3yJdVeUJAgAALAZg9HtRRIEAFygsx1Wer0sl+QHJUl7apscjgQAAKQbKkEAAIBTGIxur/TabQMAlwonBqNnpFklyLwLh0qSKqrq1NAWcjgaAACQTnzJE5hUpAIAAHv5koPRHQ4kTZAEAQAXSLbDSrOZIGWF2Ro9JFuRaEz/2HfS6XAAAEAaoRIEAAA4hXZY9kqv3TYAcKnOdljpVQkiSZePi1eDvPT2MYcjAQAA6cSXOHwSibD5AAAA7NVZCcI6xA4kQQDABax2WIE0mwkiSe8bH0+C/G3vcYcjAQAA6YRKEAAA4JRkW05mgtgi/XbbAMCFQole1NaH8XTy/7d37/FR1dfex79zycwkkIRLTMIllZuKCAIGiUEptU9eTb2gtlp51AOUKl7pxTxtFW3BamuoVQ+nFk1FKdqqoPVSj3CwNpVj0VgqEKWCKHJVSAAvSUjIbeb3/JHMhADRJOzZezLzeb86ryZ79k5WfgKzstes38ofliG3S9q2v1Z7Pj/kdDgAACBBeNiGAgAAOCQyGJ08xBYUQQAgBiRyJ0h6SpLGDO4jSVrzAd0gAADAHnSCAAAApzATxF5epwMAAEhNCTwTRJImj8jQ27s/19znN2rZv3YpOz0gSXKpdT0OW5bwhy5Xz1urXj6PFlx6utNhAAAASR5P+OZDyOFIAABAognf/mE7LHtQBAGAGBB+B6LXnXidIJL0nQmD9fK7Ffpg30Gt3/W50+FETZ+UJIogAADEiPBAUjpBAACA3dgOy14UQQAgBjS3doIkJWgnyIn9e+mVoin66LM6/XPbp6prbFY4DQi/KcIc9u6Inpoi+L0ep0MAAACtvGxDAQAAHOJ2MRjdThRBACAGNLXOBPEm4EyQww3um6LBuSlOhwEAABKAp7UDl04QAABgNw9vxrBVYt9tA4AYEZkJ4k7MThAAAAC7hWexBYPcfAAAAPaKbIdFJ4gtKIIAQAwIvwMxKcE7QQAAAOwSvvlAJwgAALBbZDuskMOBJAjutgFADGhK8JkgAAAAdvPyDkwAAOAQBqPbiyIIAMSAtiII/ywDAADYgU4QdNdrr72mqVOnauDAgXK5XHrhhRe+9JrVq1frjDPOkN/v14gRI7R06dKoxwkAiF0MRrcXd9sAwGHBkFF9U0sRJMXncTgaAACAxOCNDCRlHwp0TW1trcaOHatFixZ16vzt27frggsu0Lnnnqvy8nL96Ec/0jXXXKOXX345ypECAGJV+D2wDEa3h9fpAAAg0R1qCkY+7uXnn2UAAAA7eNwtdx+aGYyOLjrvvPN03nnndfr8kpISDR06VPfdd58k6dRTT9WaNWv0n//5nyosLIxWmACAGMZgdHvRCQIADqtraJYkuV2S38s/ywAAAHZo6wTh5gOiq6ysTAUFBe2OFRYWqqysrMNrGhoaVF1d3e4BAIgf4e2weDOGPbjbBgAOq21s6QTp5fPK5WIwOgAAgB2YCQK7VFRUKCsrq92xrKwsVVdX69ChQ8e8pri4WOnp6ZFHTk6OHaECAGzipRPEVhRBAMBhta2dICl+5oEAAADYxeuhEwSxa+7cuaqqqoo8du/e7XRIAAALuelItRWbzwOAw8IzQVJ8/JMMAABgl7ZOEAajI7qys7NVWVnZ7lhlZaXS0tKUnJx8zGv8fr/8fr8d4QEAHOBp3QkkSCeILegEAQCHRTpBfHSCAAAA2IWZILBLfn6+SktL2x175ZVXlJ+f71BEAACnRQajk4fYgiIIADis7rCZIAA6Z9GiRRoyZIgCgYDy8vK0du3aDs9dvHixJk+erL59+6pv374qKCj4wvMBAInB4275dZiZIOiqgwcPqry8XOXl5ZKk7du3q7y8XLt27ZLUspXVjBkzIudff/312rZtm37605/qvffe04MPPqinn35aN998sxPhAwBiQNt2WA4HkiAoggCAw5gJAnTN8uXLVVRUpPnz52v9+vUaO3asCgsLtW/fvmOev3r1al1xxRV69dVXVVZWppycHH3jG9/Qxx9/bHPkAIBY4mn9bZhOEHTVW2+9pfHjx2v8+PGSpKKiIo0fP17z5s2TJO3duzdSEJGkoUOHasWKFXrllVc0duxY3XfffXrkkUdUWFjoSPwAAOexHZa9eNsxADisbSYIRRCgM+6//37Nnj1bs2bNkiSVlJRoxYoVWrJkiW699dajzn/iiSfaff7II4/o2WefVWlpabt3aQIAEkukEyTIzQd0zde+9jWZL7hptXTp0mNes2HDhihGBQDoSdgOy150ggCAw2obGIwOdFZjY6PWrVungoKCyDG3262CggKVlZV16mvU1dWpqalJ/fr16/CchoYGVVdXt3sAAOILM0EAAIBT3HSC2IoiCAA4rK6xZTusXnSCAF/qwIEDCgaDysrKanc8KytLFRUVnfoat9xyiwYOHNiukHKk4uJipaenRx45OTnHFTcAIPaE34HZHGIzbgAAYK/wtpx0gtiDIggAOCzSCeKnEwSItgULFmjZsmV6/vnnFQgEOjxv7ty5qqqqijx2795tY5QAADvQCQIAAJziJg+xFXfcAMBhh5paB6Mn0QkCfJmMjAx5PB5VVla2O15ZWans7OwvvPbee+/VggUL9Le//U2nn376F57r9/vl9/uPO14AQOwKd4KwDQUAALAbg9HtRScIADiMThCg83w+n3Jzc1VaWho5FgqFVFpaqvz8/A6vu+eee3TXXXdp1apVmjBhgh2hAgBinLd1MHqQwegAAMBmDEa3F3fcAMBhzAQBuqaoqEgzZ87UhAkTNHHiRC1cuFC1tbWaNWuWJGnGjBkaNGiQiouLJUm//vWvNW/ePD355JMaMmRIZHZI79691bt3b8d+DgCAs9pmgnDzAQAA2Cs8GJ08xB4UQQDAYXSCAF0zbdo07d+/X/PmzVNFRYXGjRunVatWRYal79q1S253W7PrQw89pMbGRl122WXtvs78+fN1xx132Bk6ACCGeD3sxQ0AAJwRnk0WYjssW3DHDQAcRicI0HVz5szRnDlzjvnc6tWr232+Y8eO6AcEAOhx6AQBAABOYTC6vZgJAgAOq2ts6QRJpggCAABgGy83HwAAgEM8kTzE4UASBEUQAHBYuAjSy0dzHgAAgF3aOkG4+wAAAOzlcbEdlp0oggCAw2rD22H56QQBAACwi7d1fhSdIAAAwG7h2WRNtILYgiIIADisLjwYnU4QAAAA2zATBAAAOCU5qeWNsPVNFEHskJB33H5b+oFWbtzb7pirtQVJklxHnO9yfcnnh11x9HNf/MVcHT/V7jmvx620gFd9U3wantlbp2SnKn9YfwWSeOc40JM1BUNqbK36pzATBAAAwDbhIogxUihkIgNKAQAAoi08F7a+KehwJIkhIYsge6vq9V5FjdNhHLcUn0fnjszU984eqtwT+zodDoBuCM8DkegEAQAAsJPnsKJHc8jIRxEEAADYxO9tKYIcoghii4S843b1OUN0wZgBkc+N2tqfj5xFc2RjtDniBNPhJ+2/7pFfu6vfpzEYUk19s/ZVN2jr/oNat+NT7amq14p39mrFO3s1+aQM3X7BqRqZnSYAPUdd6zyQJI9LPi87FAIAANjFe1jRg7kgAADATuFOkEONFEHskJBFkBGZqRqRmep0GMfFGKN3PqrSE//cqWfXf6x/fHBAF/52ja6fMlxzvj6CbbKAHqKWeSAAAACOaN8JEpLE71AAAMAebTNBKILYgbcd91Aul0tjc/ronsvG6tX/9zV987RsNYeMfvfqVn3rwTf04f6DTocIoBPCFX/mgQAAANiLThAAAOCUcBGE7bDsQREkDnylf4pKpueq5D/OUP9ePm3eW62pD6zRi2/vcTo0AF+itnU7LIogAAAA9vJQBAEAAA5J9rXclqcTxB4UQeLIN0cP0P/8cLLyh/VXXWNQP3hqg+59eYtCJPRAzArPBOnlZzssAAAAO7lcrkghhCIIAACwE4PR7UURJM5kpgX0p2vydN2UYZKk3726VTc8sU61Dc0ORwbgWNpmgtAJAgAAYLdwEaSZIggAALBReDB6fVOIN7DbgCJIHPK4XZp73qm67ztj5fO49fK7lbqspEwffVbndGgAjhDpBGEwOgAAgO28dIIAAAAHhGeCSFJDc8jBSBIDRZA4dmnuYD117VnK6N0yJ+RbD76hf39c5XRYAA5T1zoYPZlOEAAAANvRCQIAAJwQOKwIwpZY0UcRJM7lnthXf5lzjkZmp2p/TYOm/b5M//hgv9NhAWgVLoLQCQIAAGC/tk4Q3oEJAADs43G75PMyHN0uFEESwKA+yXr6+nzlD+uv2sagZv3hX3pu/UdOhwVAiszrSfHTCQIAAGA3j7vlV2I6QQAAgN3CW2LRCRJ9FEESRFogSUu/d6YuGjtQzSGjoqff1oOrt8oYkn3ASXSCAAAAOCfcCdIc5PciAABgr0BSy635Q40UQaKNIkgC8Xs9WjhtnK796jBJ0j2rtmj+i+8yBBBwUHgwOjNBAAAA7OdhMDoAAHBIuBOE7bCijyJIgnG7Xbrt/FP18wtHyeWSHi/bqRufWMdfNsAhtZFOEIogAAAAdmMwOgAAcEqA7bBsQxEkQV19zlA9cMV4+TxuvfxupWY8ulZVh5qcDgtIOHWRmSBshwUAAGA3L50gAADAIeFdQdgOK/oogiSwC08fqMe+N1Gpfq/W7vhUl5eUqaKq3umwgIRSy0wQAAAAx7R1goQcjgQAACSayHZYzeQh0UYRJMHlD++v5dflKzPVry2VNfr2g69r674ap8MCEka42p/CdlgAAAC2YyYIAABwSng7rHo6QaKOIgg0amCanr1hkoZl9NKeqnpdVlKmdTs/czosICHUtg5GpwgCAABgP6+HmSAAAMAZycwEsQ1FEEiScvql6M83TNLYnD76vK5JVz3ypko3VzodFhD36hpat8NiJggAAIDtPO6WX4mDQYogAADAXgxGtw9FEET06+XTU7Pz9LVTTlB9U0jX/nGdnn5rt9NhAXGNThAAAADnRAajG4ogAADAXsm+llvzDEaPvm4VQRYtWqQhQ4YoEAgoLy9Pa9eu7dR1y5Ytk8vl0iWXXNKdbwsbpPi8Wjxjgi49Y7CCIaOf/vkdLXp1qwy/FACWM8aorpFOEAAAAKcwEwQAADglMhidTpCo63IRZPny5SoqKtL8+fO1fv16jR07VoWFhdq3b98XXrdjxw79+Mc/1uTJk7sdLOyR5HHr3u+cruunDJck/eblLbrjxXf5xQCwWENzKPL3KplOEAAAANuFO0GYCQIAAOxGEcQ+XS6C3H///Zo9e7ZmzZqlUaNGqaSkRCkpKVqyZEmH1wSDQV111VX6xS9+oWHDhh1XwLCHy+XSreeN1LwLR0mSHivbqR88tUENzfylBKyyv6ZBkuTzupVKJwgAAIDt2jpBQg5HAgAAEo2fmSC26VIRpLGxUevWrVNBQUHbF3C7VVBQoLKysg6vu/POO5WZmamrr766+5HCEd87Z6h+e8V4JXlcWrFxr7675F+qrm9yOiwgLuyrqZckZaX55XK5HI4GAAAg8UQ6QRiMDgAAbJYcKYLwZoxo61IR5MCBAwoGg8rKymp3PCsrSxUVFce8Zs2aNXr00Ue1ePHiTn+fhoYGVVdXt3vAOReNHag/fHeievk8Ktv2iab9/k3tq653Oiygx6usbukEyUoNOBwJAABAYvK4W34lZutfAABgt/DW6AxGj75uDUbvrJqaGk2fPl2LFy9WRkZGp68rLi5Wenp65JGTkxPFKNEZ55yUoeXX5Sujt0+b91br2w+9oW37DzodFtCjVVaHO0EoggAAADiBmSAAAMApzASxT5eKIBkZGfJ4PKqsrGx3vLKyUtnZ2Ued/+GHH2rHjh2aOnWqvF6vvF6vHn/8cb344ovyer368MMPj/l95s6dq6qqqshj9+7dXQkTUTJ6ULqevWGSTuyfoo8+O6TLSsr09u7PnQ4L6LHCnSCZaX6HIwEAAEhMHk94JghFEAAAYK8AM0Fs06UiiM/nU25urkpLSyPHQqGQSktLlZ+ff9T5I0eO1MaNG1VeXh55XHTRRTr33HNVXl7eYYeH3+9XWlpauwdiw4n9e+nZGyZpzKB0fVrbqCsWv6n/fX+/02EBPdI+OkEAAAAc5XHRCQIAAJwR3g6LTpDo6/J2WEVFRVq8eLEee+wxbd68WTfccINqa2s1a9YsSdKMGTM0d+5cSVIgENDo0aPbPfr06aPU1FSNHj1aPp/P2p8Gtsjo7ddT156lySdlqK4xqKuX/kvPb/jI6bCAHqfysMHoAAAAsF94O6xgiIGkAADAXgFvy615OkGiz9vVC6ZNm6b9+/dr3rx5qqio0Lhx47Rq1arIsPRdu3bJ7Y7qqBHEgN5+rx6deaZ+/MzbevHtPbp5+dvaX9Oga7863OnQgB6DwegAAADO8jATBAAAOCTSCcJg9KjrchFEkubMmaM5c+Yc87nVq1d/4bVLly7tzrdEDPJ53Vo4bZxOSPXr0TXbdffK97SvukG3nX+q3K2/TADoWHgweibbYQEAADjCG54JEqQIAgAA7JXMTBDb0LKB4+J2u/TzC0fptvNHSpIeWbNdNz9drsZm2smBL1LX2Kya+mZJbIcFAADgFDpBAACAUxiMbh+KILDEtV8drvsvHyuv26W/lO/R1Y/9Swcbmp0OC4hZ+1q3wkrxedTb362mPAAAABwnb+tWzkGKIAAAwGZtg9FDMoZcJJoogsAy3z5jsB6ZOUEpPo/+8cEBXfHwmzpwsMHpsICYFN4KKystIJeL7eMAAACcQCcIAABwSrgTRJIa2FUnqiiCwFJfOyVTT84+S/16+bTx4ypd+tAb2vlJrdNhATGnsqalQJiZylZYAAAATvG2FkFCvPsSAADYLOBtuzV/iOHoUUURBJYbl9NHf74+X4P7JmvnJ3W69KE39O+Pq5wOC4gp+w7rBAEAAIAzIp0gDEYHAAA283rc8nlabs8zFyS6KIIgKoad0FvP3TBJpw5I04GDjfq/D7+p17cecDosIGa0bYdFJwgAAIBTwp0gwRBbUAAAAPsFkiiC2IEiCKImMy2g5dedpfxh/XWwoVnf/cNavfj2HqfDAmJCZetgdDpBAAAAnONpHYzOTBAAAOCE8HB0tsOKLoogiKq0QJKWfu9MXTBmgJqCRj94aoN+/78fyrDnLhJcuBMkkyIIAACAY7yecCcIv58AAAD7JbcOR6+nEySqKIIg6vxej357xXh9d9IQSVLx/7ynn73wbzUHaTlH4trXOhg9i8HoAAAAjonMBKEIAgAAHBCIFEG4TxpNFEFgC4/bpTsuOk3zLhwll0t64p+7dM3jb+lgQ7PToQG2M8YcNhOEThAAAACntM0EoQgCAADsFy6CMBMkuiiCwFbfO2eoSv4jV4Ekt1Zv2a/vlJRpb9Uhp8MCbHWwoVl1rXs9ZjIYHQAAwDFuF50gAADAOckUQWxBEQS2KzwtW8uvzVdGb582763WJYte17t7qpwOC7DNpj3VkqSM3n6l+LwORwMAAJC4fN6WX4kPNdKhDgAA7BcejF7PYPSooggCR4zN6aPnbzxbIzJ7q7K6QZeXlOlvmyqdDguwxT8+OCBJOmdEf4cjAQAASGxD+veSJG3bX+twJAAAIBHRCWIPiiBwTE6/FD17wyRNGt5ftY1Bzf7jW1r06lYZQys64ttrH+yXJE0+6QSHIwEAAEhsJ2X1liTt+KRWDc3cfAAAAPbyJ7V2pVIEiSqKIHBUenKSHvveRP3HWV+RMdJvXt6i7z+1QXW0oyNOfVrbqI0ft2z/NvmkDIejAQAASGyZqX6lBbwKGWn7AbpBAACAvcKdIPUUQaKKIggcl+Rx65eXjNGvvjVaXrdLL72zV5c9VKaPPqtzOjTAcq9vPSBjpJHZqcpMCzgdDgAAQEJzuVw6KStVkvRB5UGHowEAAImG7bDsQREEMeOqvBP15Oyz1L+XT5v2VuuC367RK8wJQZz5R2QrLLpAAAAAYsFJmS1bYn2wjyIIAACwF4PR7UERBDFl4tB++sucszU2p4+qDjVp9uNv6ZcvbVJjc8jp0IDjZoyJDEVnHggAAEBsGBEuglTWOBwJAABINAE6QWxBEQQxZ3DfFD1zXb6uPmeoJOmRNdv1nd+XafenbI+Fnu2f2z/V3qp6+bxuTRzaz+lwAAAAILVth0UnCAAAsFnbdli8ATyaKIIgJvm8bv38wlF6eHqu0gJevb37c31z4Wv645s7FQoZp8MDuqyusVm3PPuOJOnb4wdFKv0AAABwVng7rB0HaulABwAAtop0grAdVlRRBEFM+8Zp2Vrxg8k6c0hf1TYG9fMX/q0rH3lTuz6hKwQ9yz2rtmjnJ3UakB7QbRec6nQ4AAAAaDUgPaBePo+aQ0Y7P6l1OhwAAJBA0pOTJEkf7KtRkDd+Rw1FEMS8nH4pWn5tvuZPHaXkJI/e3PapChe+psWvbeOdWugRHi/boaVv7JAk/frS05UWSHI2ICAOLFq0SEOGDFEgEFBeXp7Wrl37hec/88wzGjlypAKBgMaMGaOVK1faFCkAINa5XC6NYEssAADggCmnnKA+KUna+UmdXnpnj9PhxC2v0wEAneF2uzTr7KH6PyOzdMuz76hs2yf61crN+tM/d+r/feMUnT86W15P92t6xhg1BY2CIaOmUEjNQaPmYEjNIaPmYMuxYMioKdj6XPic1mMtz7Ucj3zcer3LJbnkktsluVs+kdvV8rmr9WOXyyWXwh+r9bm2Y253y9dwHXZcarleanlORx3TER+0nXfkOS5X20nGGIWMZGTU+j+FjJFp/dhEPm79f9P6vFo+llqvD3+NL3F47Ec918FTrnbPta6L2tbG1bq2LrlkZBQKHRZ7u5+hLc6W/yYuedwued0uJXnc8nndMsboUFNQ9U1B1TUGFQwZJfs8Sk7yKNnnkc/jVkNzSPVNQdU3hdQcCumEVL8yUwP6tLZRL72zR4+X7ZQkzZ48VF89mYHowPFavny5ioqKVFJSory8PC1cuFCFhYXasmWLMjMzjzr/jTfe0BVXXKHi4mJdeOGFevLJJ3XJJZdo/fr1Gj16tAM/AQAg1pyc2Vtv7/5c71fW6PwxA5wOBwAAJIjefq+uOWeo7v3r+/rd37dq6ukD5XZ3fK8M3eMyxsR8n011dbXS09NVVVWltLQ0p8OBw0Iho2fW7da9f31f+2saJEkD0wP6zoQc5Q3rp1Oz0xRI8ihkjD452KiK6nrt+KRWOz+p1Y5P6vTRZ4d0sL5JdY1B1TY0q64xqGbazRBlPyk8RTd+bXi7ghNwvBL19TEvL09nnnmmfve730mSQqGQcnJy9P3vf1+33nrrUedPmzZNtbW1eumllyLHzjrrLI0bN04lJSWd+p6JutYAkCgefu1D3b3yPY3MTtW8C0dpzOB0SUe8yab1zTc9nZWz6Xh9tA9rDQDxq7q+Secs+Luq65t1x9RRuuD0gUpL/vLehS96Y7HU8ZuLneZ1uyy7P9bZ10c6QdDjuN0uTTvzK7rw9IFa/I9t+mPZTu2pqtd/lX4glVr4fVyS1+2W19PSGeD1uCMdAl5PS8dAUvj51ufCz3vcLiW1niOprTPCmEjXRMi0dSSEwscP66AImcM6MA7rzgi17gAWLtscWccMfxruwmj7/PBzTPtjh51zeFeF+4hf9sKdKpEOFrVc4D6iEyPc8dK+Y+PoGNsdO9Z/hGOe1/Zzte/uOPznaTvmbg0iHIv7GL/Iulwt/z2CIRN5NAVDke3WDu/8cLtcqm8K6lBTUIcag2oMhuT3uhVI8ijg9cjtlvbVNGhfdYP69fJpcN9kzcgfom+Ozj7WTwigixobG7Vu3TrNnTs3csztdqugoEBlZWXHvKasrExFRUXtjhUWFuqFF17o8Ps0NDSooaEh8nl1dfXxBQ4AiGkThvSTyyW9V1GjKx/5p9PhRI3H7dKHd5/vdBgAAOAwaYEkzTp7qP6r9APd8d+bdMd/b3I6pKjadGehUnz2liUogqDH6uX36kcFJ+v6KcO14p29Wv3+fr2141PtraqPnBNIcuuEVL+G9O+lE/unaEj/Xsrpl6L05CT18nmV4vcopXVLo0ghw9NS3KD1DABiz4EDBxQMBpWVldXueFZWlt57771jXlNRUXHM8ysqKjr8PsXFxfrFL35x/AEDAHqEM77SVyu+P1lP/HOn/lK+Rwcbmp0OCQAAJJBrJg/Vlooabfy4SnuqDh3zDcToPoog6PECSR5dmjtYl+YOliQ1NofUGAzJJSnF52H7IQBAl82dO7dd90h1dbVycnIcjAgAEG2jBqbpV98ao7suHq3GYEtHcLtZeDq6CxsAAMAKqYEklUzPldRyb7OhOfiF539ZRhLLKUuyhVtzdhZFEMQdn7dloDUAIP5kZGTI4/GosrKy3fHKykplZx9727ns7OwunS9Jfr9ffr//+AMGAPQ4brdLAbf9v5wDAABI3NuMBlYTAAD0GD6fT7m5uSotbRsCFQqFVFpaqvz8/GNek5+f3+58SXrllVc6PB8AAAAAAMQPOkEAAECPUlRUpJkzZ2rChAmaOHGiFi5cqNraWs2aNUuSNGPGDA0aNEjFxcWSpB/+8IeaMmWK7rvvPl1wwQVatmyZ3nrrLT388MNO/hgAAAAAAMAGFEEAAECPMm3aNO3fv1/z5s1TRUWFxo0bp1WrVkWGn+/atUtud1uz66RJk/Tkk0/qZz/7mW677TaddNJJeuGFFzR69GinfgQAAAAAAGATl+kBk92qq6uVnp6uqqoqpaWlOR0OAAAxgddH+7DWAAAcjddH+7DWAAAcrbOvj8wEAQAAAAAAAAAAcYkiCAAAAAAAQBcsWrRIQ4YMUSAQUF5entauXdvhuUuXLpXL5Wr3CAQCNkYLAEBiowgCAAAAAADQScuXL1dRUZHmz5+v9evXa+zYsSosLNS+ffs6vCYtLU179+6NPHbu3GljxAAAJDaKIAAAAAAAAJ10//33a/bs2Zo1a5ZGjRqlkpISpaSkaMmSJR1e43K5lJ2dHXlkZWXZGDEAAImNIggAAAAAAEAnNDY2at26dSooKIgcc7vdKigoUFlZWYfXHTx4UCeeeKJycnJ08cUX691337UjXAAAIIogAAAAAAAAnXLgwAEFg8GjOjmysrJUUVFxzGtOOeUULVmyRH/5y1/0pz/9SaFQSJMmTdJHH33U4fdpaGhQdXV1uwcAAOgeiiAAAAAAAABRkp+frxkzZmjcuHGaMmWKnnvuOZ1wwgn6/e9/3+E1xcXFSk9PjzxycnJsjBgAgPhCEQQAAAAAAKATMjIy5PF4VFlZ2e54ZWWlsrOzO/U1kpKSNH78eG3durXDc+bOnauqqqrIY/fu3ccVNwAAiYwiCAAAAAAAQCf4fD7l5uaqtLQ0ciwUCqm0tFT5+fmd+hrBYFAbN27UgAEDOjzH7/crLS2t3QMAAHSP1+kAAAAAAAAAeoqioiLNnDlTEyZM0MSJE7Vw4ULV1tZq1qxZkqQZM2Zo0KBBKi4uliTdeeedOuusszRixAh9/vnn+s1vfqOdO3fqmmuucfLHAAAgYVAEAQAAAAAA6KRp06Zp//79mjdvnioqKjRu3DitWrUqMix9165dcrvbNt747LPPNHv2bFVUVKhv377Kzc3VG2+8oVGjRjn1IwAAkFBcxhjjdBBfprq6Wunp6aqqqqIFFACAVrw+2oe1BgDgaLw+2oe1BgDgaJ19fWQmCAAAAAAAAAAAiEsUQQAAAAAAAAAAQFyiCAIAAAAAAAAAAOISRRAAAAAAAAAAABCXKIIAAAAAAAAAAIC4RBEEAAAAAAAAAADEJa/TAXSGMUaSVF1d7XAkAADEjvDrYvh1EtFDLgIAwNHIRexDLgIAwNE6m4v0iCJITU2NJCknJ8fhSAAAiD01NTVKT093Ooy4Ri4CAEDHyEWij1wEAICOfVku4jI94C0boVBIe/bsUWpqqlwu13F9rerqauXk5Gj37t1KS0uzKMLExXpah7W0DmtpLdbTOlavpTFGNTU1GjhwoNxudriMJnKR2MV6Woe1tA5raS3W0zrkIj2XlbmIxN8rK7GW1mEtrcNaWov1tI5TuUiP6ARxu90aPHiwpV8zLS2NP7QWYj2tw1pah7W0FutpHSvXkndd2oNcJPaxntZhLa3DWlqL9bQOuUjPE41cROLvlZVYS+uwltZhLa3FelrH7lyEt2oAAAAAAAAAAIC4RBEEAAAAAAAAAADEpYQrgvj9fs2fP19+v9/pUOIC62kd1tI6rKW1WE/rsJaQ+HNgNdbTOqyldVhLa7Ge1mEtEcafBeuwltZhLa3DWlqL9bSOU2vZIwajAwAAAAAAAAAAdFXCdYIAAAAAAAAAAIDEQBEEAAAAAAAAAADEJYogAAAAAAAAAAAgLlEEAQAAAAAAAAAAcSkuiyCLFi3SkCFDFAgElJeXp7Vr137h+c8884xGjhypQCCgMWPGaOXKlTZF2jN0ZT0XL16syZMnq2/fvurbt68KCgq+dP0TSVf/bIYtW7ZMLpdLl1xySXQD7EG6upaff/65brrpJg0YMEB+v18nn3wyf9dbdXUtFy5cqFNOOUXJycnKycnRzTffrPr6epuijV2vvfaapk6dqoEDB8rlcumFF1740mtWr16tM844Q36/XyNGjNDSpUujHifsQS5iLXIR65CLWIdcxFrkI9YgH0EYuYi1yEWsQy5iHXIRa5GLWCNmcxETZ5YtW2Z8Pp9ZsmSJeffdd83s2bNNnz59TGVl5THPf/31143H4zH33HOP2bRpk/nZz35mkpKSzMaNG22OPDZ1dT2vvPJKs2jRIrNhwwazefNm893vftekp6ebjz76yObIY09X1zJs+/btZtCgQWby5Mnm4osvtifYGNfVtWxoaDATJkww559/vlmzZo3Zvn27Wb16tSkvL7c58tjT1bV84oknjN/vN0888YTZvn27efnll82AAQPMzTffbHPksWflypXm9ttvN88995yRZJ5//vkvPH/btm0mJSXFFBUVmU2bNpkHHnjAeDwes2rVKnsCRtSQi1iLXMQ65CLWIRexFvmIdchHYAy5iNXIRaxDLmIdchFrkYtYJ1ZzkbgrgkycONHcdNNNkc+DwaAZOHCgKS4uPub5l19+ubngggvaHcvLyzPXXXddVOPsKbq6nkdqbm42qamp5rHHHotWiD1Gd9ayubnZTJo0yTzyyCNm5syZvNi36upaPvTQQ2bYsGGmsbHRrhB7jK6u5U033WS+/vWvtztWVFRkzj777KjG2dN05oX+pz/9qTnttNPaHZs2bZopLCyMYmSwA7mItchFrEMuYh1yEWuRj0QH+UjiIhexFrmIdchFrEMuYi1ykeiIpVwkrrbDamxs1Lp161RQUBA55na7VVBQoLKysmNeU1ZW1u58SSosLOzw/ETSnfU8Ul1dnZqamtSvX79ohdkjdHct77zzTmVmZurqq6+2I8weoTtr+eKLLyo/P1833XSTsrKyNHr0aN19990KBoN2hR2TurOWkyZN0rp16yJtodu2bdPKlSt1/vnn2xJzPOH1Jz6Ri1iLXMQ65CLWIRexFvmIs3gNij/kItYiF7EOuYh1yEWsRS7iLLteg7yWfjWHHThwQMFgUFlZWe2OZ2Vl6b333jvmNRUVFcc8v6KiImpx9hTdWc8j3XLLLRo4cOBRf5gTTXfWcs2aNXr00UdVXl5uQ4Q9R3fWctu2bfr73/+uq666SitXrtTWrVt14403qqmpSfPnz7cj7JjUnbW88sordeDAAZ1zzjkyxqi5uVnXX3+9brvtNjtCjisdvf5UV1fr0KFDSk5OdigyHA9yEWuRi1iHXMQ65CLWIh9xFvlI/CEXsRa5iHXIRaxDLmItchFn2ZWLxFUnCGLLggULtGzZMj3//PMKBAJOh9Oj1NTUaPr06Vq8eLEyMjKcDqfHC4VCyszM1MMPP6zc3FxNmzZNt99+u0pKSpwOrcdZvXq17r77bj344INav369nnvuOa1YsUJ33XWX06EBwFHIRbqPXMRa5CLWIh8B0FOQi3QfuYi1yEWsRS7S88RVJ0hGRoY8Ho8qKyvbHa+srFR2dvYxr8nOzu7S+YmkO+sZdu+992rBggX629/+ptNPPz2aYfYIXV3LDz/8UDt27NDUqVMjx0KhkCTJ6/Vqy5YtGj58eHSDjlHd+XM5YMAAJSUlyePxRI6deuqpqqioUGNjo3w+X1RjjlXdWcuf//znmj59uq655hpJ0pgxY1RbW6trr71Wt99+u9xuauud1dHrT1paGu+67MHIRaxFLmIdchHrkItYi3zEWeQj8YdcxFrkItYhF7EOuYi1yEWcZVcuElf/RXw+n3Jzc1VaWho5FgqFVFpaqvz8/GNek5+f3+58SXrllVc6PD+RdGc9Jemee+7RXXfdpVWrVmnChAl2hBrzurqWI0eO1MaNG1VeXh55XHTRRTr33HNVXl6unJwcO8OPKd35c3n22Wdr69atkYRJkt5//30NGDAgoV/ou7OWdXV1R72Yh5OolplX6Cxef+ITuYi1yEWsQy5iHXIRa5GPOIvXoPhDLmItchHrkItYh1zEWuQizrLtNcjSMesxYNmyZcbv95ulS5eaTZs2mWuvvdb06dPHVFRUGGOMmT59urn11lsj57/++uvG6/Wae++912zevNnMnz/fJCUlmY0bNzr1I8SUrq7nggULjM/nM3/+85/N3r17I4+amhqnfoSY0dW1PNLMmTPNxRdfbFO0sa2ra7lr1y6Tmppq5syZY7Zs2WJeeuklk5mZaX75y1869SPEjK6u5fz5801qaqp56qmnzLZt28xf//pXM3z4cHP55Zc79SPEjJqaGrNhwwazYcMGI8ncf//9ZsOGDWbnzp3GGGNuvfVWM3369Mj527ZtMykpKeYnP/mJ2bx5s1m0aJHxeDxm1apVTv0IsAi5iLXIRaxDLmIdchFrkY9Yh3wExpCLWI1cxDrkItYhF7EWuYh1YjUXibsiiDHGPPDAA+YrX/mK8fl8ZuLEiebNN9+MPDdlyhQzc+bMduc//fTT5uSTTzY+n8+cdtppZsWKFTZHHNu6sp4nnniikXTUY/78+fYHHoO6+mfzcLzYt9fVtXzjjTdMXl6e8fv9ZtiwYeZXv/qVaW5utjnq2NSVtWxqajJ33HGHGT58uAkEAiYnJ8fceOON5rPPPrM/8Bjz6quvHvPfv/D6zZw500yZMuWoa8aNG2d8Pp8ZNmyY+cMf/mB73IgOchFrkYtYh1zEOuQi1iIfsQb5CMLIRaxFLmIdchHrkItYi1zEGrGai7iMoUcHAAAAAAAAAADEn7iaCQIAAAAAAAAAABBGEQQAAAAAAAAAAMQliiAAAAAAAAAAACAuUQQBAAAAAAAAAABxiSIIAAAAAAAAAACISxRBAAAAAAAAAABAXKIIAgAAAAAAAAAA4hJFEAAAAAAAAAAAEJcoggAAAAAAAAAAgLhEEQQAAAAAAAAAAMQliiAAAAAAAAAAACAuUQQBAAAAAAAAAABx6f8DrcPgncA5D/wAAAAASUVORK5CYII=",
"text/plain": [
"
"
]
@@ -215,7 +372,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0]"
+ "version": "3.9.16"
},
"orig_nbformat": 4,
"vscode": {
diff --git a/MindFlow/applications/cfd/lax/lax_tube_CN.ipynb b/MindFlow/applications/cfd/lax/lax_tube_CN.ipynb
index 9d7a5be1dd3dff72b3e09a48f52aedc6a86f6a30..12feb2edbea2cad1fd40d562fed9bcd5b002c083 100644
--- a/MindFlow/applications/cfd/lax/lax_tube_CN.ipynb
+++ b/MindFlow/applications/cfd/lax/lax_tube_CN.ipynb
@@ -55,9 +55,7 @@
"from mindflow.cfd.runtime import RunTime\n",
"from mindflow.cfd.simulator import Simulator\n",
"\n",
- "from src.ic import lax_ic_1d\n",
- "\n",
- "context.set_context(device_target=\"GPU\", device_id=3)"
+ "from src.ic import lax_ic_1d"
]
},
{
@@ -65,9 +63,17 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## 定义Simulator和RunTime\n",
+ "## 设置 MindSpore 运行环境\n",
+ "\n",
+ "在运行程序之前,应配置context。`context.set_context`里面常用参数表示如下:\n",
+ "\n",
+ "`mode`表示运行的模式,'GRAPH'表示静态图模式, 'PYNATIVE'表示动态图模式,详见[MindSpore 官网](https://www.mindspore.cn/docs/zh-CN/r2.0/design/dynamic_graph_and_static_graph.html),默认值'GRAPH';\n",
"\n",
- "网格、材料、仿真时间、边界条件和数值方法的设置在文件[numeric.yaml](https://gitee.com/mindspore/mindscience/blob/master/MindFlow/applications/cfd/lax/numeric.yaml) 中。"
+ "`save_graphs`表示是否保存计算图,默认值'False';\n",
+ "\n",
+ "`device_target`表示使用的计算平台类型,可以选择'Ascend'或'GPU',默认值'GPU';\n",
+ "\n",
+ "`device_id`表示使用的计算卡编号,可按照实际情况填写,默认值 0;"
]
},
{
@@ -75,8 +81,47 @@
"execution_count": 2,
"metadata": {},
"outputs": [],
+ "source": [
+ "context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target=\"GPU\", device_id=0)"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 读取配置文件\n",
+ "\n",
+ "本程序提供了多种选项来配置网格、材料、仿真时间、边界条件和数值方法。这些配置可以在[numeric.yaml](./numeric.yaml)文件中进行设置。用户可以根据自己的需求选择不同的数值方法。本程序支持以下数值方法:WENO3、WENO5和WENO7三种重构格式,以及Rsuanov、HLLC和Roe三种Riemann求解器。\n",
+ "\n",
+ "除了在配置文件中直接进行设置外,还可以通过修改以下代码来选择要使用的数值方法。在下面的代码块中,第二和第三行是设置数值方法的位置。如果希望直接在配置文件中指定数值方法,请将这两行代码注释掉。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
"source": [
"config = load_yaml_config('numeric.yaml')\n",
+ "config[\"space_solver\"][\"convective_flux\"][\"reconstructor\"] = \"WENO5\"\n",
+ "config[\"space_solver\"][\"convective_flux\"][\"riemann_computer\"] = \"Roe\""
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 定义Simulator和RunTime"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
"simulator = Simulator(config)\n",
"runtime = RunTime(config['runtime'], simulator.mesh_info, simulator.material)"
]
@@ -93,7 +138,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@@ -114,7 +159,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 6,
"metadata": {},
"outputs": [
{
@@ -122,40 +167,152 @@
"output_type": "stream",
"text": [
"current time = 0.000000, time step = 0.001117\n",
- "current time = 0.001117, time step = 0.001107\n",
- "current time = 0.002224, time step = 0.001072\n",
- "current time = 0.003296, time step = 0.001035\n",
- "current time = 0.004332, time step = 0.001016\n",
- "current time = 0.005348, time step = 0.001008\n",
- "current time = 0.006356, time step = 0.000991\n",
- "current time = 0.007347, time step = 0.000976\n",
- "current time = 0.008324, time step = 0.000966\n",
- "current time = 0.009290, time step = 0.000960\n",
- "current time = 0.010250, time step = 0.000957\n",
- "current time = 0.011207, time step = 0.000954\n",
- "current time = 0.012161, time step = 0.000953\n",
- "current time = 0.013113, time step = 0.000952\n",
- "current time = 0.014066, time step = 0.000952\n",
- "current time = 0.015017, time step = 0.000951\n",
- "current time = 0.015969, time step = 0.000951\n",
- "current time = 0.016920, time step = 0.000952\n",
- "current time = 0.017872, time step = 0.000951\n",
- "current time = 0.018823, time step = 0.000951\n",
- "current time = 0.019775, time step = 0.000952\n",
- "current time = 0.020726, time step = 0.000953\n",
- "current time = 0.021679, time step = 0.000952\n",
- "current time = 0.022631, time step = 0.000952\n",
- "current time = 0.023583, time step = 0.000952\n",
- "current time = 0.024535, time step = 0.000952\n",
- "current time = 0.025488, time step = 0.000952\n",
- "current time = 0.026440, time step = 0.000952\n",
- "current time = 0.027392, time step = 0.000953\n",
- "current time = 0.028345, time step = 0.000952\n",
- "...\n",
- "current time = 0.136983, time step = 0.000953\n",
- "current time = 0.137936, time step = 0.000953\n",
- "current time = 0.138889, time step = 0.000953\n",
- "current time = 0.139843, time step = 0.000953\n"
+ "current time = 0.001117, time step = 0.001031\n",
+ "current time = 0.002148, time step = 0.001000\n",
+ "current time = 0.003148, time step = 0.000972\n",
+ "current time = 0.004120, time step = 0.000962\n",
+ "current time = 0.005082, time step = 0.000954\n",
+ "current time = 0.006036, time step = 0.000944\n",
+ "current time = 0.006980, time step = 0.000955\n",
+ "current time = 0.007935, time step = 0.000953\n",
+ "current time = 0.008888, time step = 0.000950\n",
+ "current time = 0.009838, time step = 0.000947\n",
+ "current time = 0.010785, time step = 0.000943\n",
+ "current time = 0.011728, time step = 0.000942\n",
+ "current time = 0.012670, time step = 0.000943\n",
+ "current time = 0.013613, time step = 0.000947\n",
+ "current time = 0.014560, time step = 0.000952\n",
+ "current time = 0.015512, time step = 0.000950\n",
+ "current time = 0.016462, time step = 0.000950\n",
+ "current time = 0.017412, time step = 0.000949\n",
+ "current time = 0.018361, time step = 0.000949\n",
+ "current time = 0.019310, time step = 0.000949\n",
+ "current time = 0.020258, time step = 0.000950\n",
+ "current time = 0.021208, time step = 0.000951\n",
+ "current time = 0.022159, time step = 0.000953\n",
+ "current time = 0.023112, time step = 0.000952\n",
+ "current time = 0.024064, time step = 0.000951\n",
+ "current time = 0.025014, time step = 0.000950\n",
+ "current time = 0.025965, time step = 0.000951\n",
+ "current time = 0.026915, time step = 0.000952\n",
+ "current time = 0.027867, time step = 0.000953\n",
+ "current time = 0.028820, time step = 0.000953\n",
+ "current time = 0.029774, time step = 0.000953\n",
+ "current time = 0.030727, time step = 0.000953\n",
+ "current time = 0.031680, time step = 0.000952\n",
+ "current time = 0.032632, time step = 0.000952\n",
+ "current time = 0.033584, time step = 0.000953\n",
+ "current time = 0.034538, time step = 0.000954\n",
+ "current time = 0.035492, time step = 0.000954\n",
+ "current time = 0.036446, time step = 0.000954\n",
+ "current time = 0.037399, time step = 0.000954\n",
+ "current time = 0.038353, time step = 0.000953\n",
+ "current time = 0.039307, time step = 0.000954\n",
+ "current time = 0.040260, time step = 0.000954\n",
+ "current time = 0.041215, time step = 0.000954\n",
+ "current time = 0.042169, time step = 0.000954\n",
+ "current time = 0.043122, time step = 0.000954\n",
+ "current time = 0.044076, time step = 0.000954\n",
+ "current time = 0.045030, time step = 0.000954\n",
+ "current time = 0.045984, time step = 0.000954\n",
+ "current time = 0.046938, time step = 0.000954\n",
+ "current time = 0.047892, time step = 0.000954\n",
+ "current time = 0.048847, time step = 0.000954\n",
+ "current time = 0.049801, time step = 0.000954\n",
+ "current time = 0.050755, time step = 0.000954\n",
+ "current time = 0.051709, time step = 0.000954\n",
+ "current time = 0.052663, time step = 0.000954\n",
+ "current time = 0.053618, time step = 0.000954\n",
+ "current time = 0.054572, time step = 0.000954\n",
+ "current time = 0.055526, time step = 0.000954\n",
+ "current time = 0.056481, time step = 0.000954\n",
+ "current time = 0.057435, time step = 0.000955\n",
+ "current time = 0.058390, time step = 0.000954\n",
+ "current time = 0.059344, time step = 0.000954\n",
+ "current time = 0.060298, time step = 0.000955\n",
+ "current time = 0.061253, time step = 0.000954\n",
+ "current time = 0.062207, time step = 0.000954\n",
+ "current time = 0.063162, time step = 0.000955\n",
+ "current time = 0.064116, time step = 0.000955\n",
+ "current time = 0.065071, time step = 0.000954\n",
+ "current time = 0.066025, time step = 0.000955\n",
+ "current time = 0.066980, time step = 0.000955\n",
+ "current time = 0.067934, time step = 0.000954\n",
+ "current time = 0.068889, time step = 0.000955\n",
+ "current time = 0.069844, time step = 0.000955\n",
+ "current time = 0.070798, time step = 0.000955\n",
+ "current time = 0.071753, time step = 0.000955\n",
+ "current time = 0.072707, time step = 0.000955\n",
+ "current time = 0.073662, time step = 0.000955\n",
+ "current time = 0.074617, time step = 0.000955\n",
+ "current time = 0.075571, time step = 0.000955\n",
+ "current time = 0.076526, time step = 0.000955\n",
+ "current time = 0.077480, time step = 0.000955\n",
+ "current time = 0.078435, time step = 0.000955\n",
+ "current time = 0.079390, time step = 0.000955\n",
+ "current time = 0.080344, time step = 0.000955\n",
+ "current time = 0.081299, time step = 0.000955\n",
+ "current time = 0.082254, time step = 0.000955\n",
+ "current time = 0.083209, time step = 0.000955\n",
+ "current time = 0.084163, time step = 0.000955\n",
+ "current time = 0.085118, time step = 0.000955\n",
+ "current time = 0.086073, time step = 0.000955\n",
+ "current time = 0.087027, time step = 0.000955\n",
+ "current time = 0.087982, time step = 0.000955\n",
+ "current time = 0.088937, time step = 0.000955\n",
+ "current time = 0.089892, time step = 0.000955\n",
+ "current time = 0.090846, time step = 0.000955\n",
+ "current time = 0.091801, time step = 0.000955\n",
+ "current time = 0.092756, time step = 0.000955\n",
+ "current time = 0.093711, time step = 0.000955\n",
+ "current time = 0.094665, time step = 0.000955\n",
+ "current time = 0.095620, time step = 0.000955\n",
+ "current time = 0.096575, time step = 0.000955\n",
+ "current time = 0.097530, time step = 0.000955\n",
+ "current time = 0.098485, time step = 0.000955\n",
+ "current time = 0.099439, time step = 0.000955\n",
+ "current time = 0.100394, time step = 0.000955\n",
+ "current time = 0.101349, time step = 0.000955\n",
+ "current time = 0.102304, time step = 0.000955\n",
+ "current time = 0.103259, time step = 0.000955\n",
+ "current time = 0.104214, time step = 0.000955\n",
+ "current time = 0.105168, time step = 0.000955\n",
+ "current time = 0.106123, time step = 0.000955\n",
+ "current time = 0.107078, time step = 0.000955\n",
+ "current time = 0.108033, time step = 0.000955\n",
+ "current time = 0.108988, time step = 0.000955\n",
+ "current time = 0.109943, time step = 0.000955\n",
+ "current time = 0.110898, time step = 0.000955\n",
+ "current time = 0.111853, time step = 0.000955\n",
+ "current time = 0.112807, time step = 0.000955\n",
+ "current time = 0.113762, time step = 0.000955\n",
+ "current time = 0.114717, time step = 0.000955\n",
+ "current time = 0.115672, time step = 0.000955\n",
+ "current time = 0.116627, time step = 0.000955\n",
+ "current time = 0.117582, time step = 0.000955\n",
+ "current time = 0.118537, time step = 0.000955\n",
+ "current time = 0.119492, time step = 0.000955\n",
+ "current time = 0.120447, time step = 0.000955\n",
+ "current time = 0.121402, time step = 0.000955\n",
+ "current time = 0.122357, time step = 0.000955\n",
+ "current time = 0.123312, time step = 0.000955\n",
+ "current time = 0.124267, time step = 0.000955\n",
+ "current time = 0.125222, time step = 0.000955\n",
+ "current time = 0.126177, time step = 0.000955\n",
+ "current time = 0.127132, time step = 0.000955\n",
+ "current time = 0.128086, time step = 0.000955\n",
+ "current time = 0.129041, time step = 0.000955\n",
+ "current time = 0.129996, time step = 0.000955\n",
+ "current time = 0.130951, time step = 0.000955\n",
+ "current time = 0.131906, time step = 0.000955\n",
+ "current time = 0.132861, time step = 0.000955\n",
+ "current time = 0.133816, time step = 0.000955\n",
+ "current time = 0.134771, time step = 0.000955\n",
+ "current time = 0.135726, time step = 0.000955\n",
+ "current time = 0.136681, time step = 0.000955\n",
+ "current time = 0.137636, time step = 0.000955\n",
+ "current time = 0.138591, time step = 0.000955\n",
+ "current time = 0.139546, time step = 0.000955\n"
]
}
],
@@ -179,18 +336,26 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABkEAAAJtCAYAAACBs9diAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAAC1uklEQVR4nOzdeXyV9Z33//d11iSQhS0LEBZFEEQWQWm0i7YoYsto5zetd22LQ6vz04H7YeWedkoXre1dmXtmtHZ+Y0sXHTp3x2oXq3Up1mKRseLCEgUVEFkSIAlhyZ6cnO33xznXlaQEJJBc13Wu6/V8PM4j5ORc5HNoPed7vp/v5/Mx0ul0WgAAAAAAAAAAAB4TcDoAAAAAAAAAAACAoUASBAAAAAAAAAAAeBJJEAAAAAAAAAAA4EkkQQAAAAAAAAAAgCeRBAEAAAAAAAAAAJ5EEgQAAAAAAAAAAHgSSRAAAAAAAAAAAOBJJEEAAAAAAAAAAIAnhZwO4EykUikdPnxYhYWFMgzD6XAAAHCFdDqt1tZWjR07VoEA5xqGEmsRAABOxlrEPqxFAAA42ZmuRXIiCXL48GFVVlY6HQYAAK5UW1ur8ePHOx2Gp7EWAQDg1FiLDD3WIgAAnNr7rUVyIglSWFgoKfNkioqKHI4GAAB3aGlpUWVlpfU+iaHDWgQAgJOxFrEPaxEAAE52pmuRnEiCmKWeRUVFvNkDAPAXaIkw9FiLAABwaqxFhh5rEQAATu391iI07QQAAAAAAAAAAJ5EEgQAAAAAAAAAAHgSSRAAAAAAAAAAAOBJJEEAAAAAAAAAAIAnkQQBAAAAAAAAAACeRBIEAAAAAAAAAAB4EkkQAACQMzZu3KglS5Zo7NixMgxDTzzxxPteE4vF9PWvf10TJ05UNBrVpEmT9PDDDw99sAAAAAAAwHEhpwMAAAA4U+3t7Zo9e7a+8IUv6K//+q/P6JpPf/rTamho0EMPPaQpU6aorq5OqVRqiCMFAAAAAABuQBIEAADkjMWLF2vx4sVn/Ph169bpxRdf1N69ezVy5EhJ0qRJk4YoOgAAAAAA4Da0wwIAAJ71u9/9TvPnz9c///M/a9y4cZo6dar+4R/+QZ2dnae9LhaLqaWlpc8NAAAAAADkHipBAACAZ+3du1cvvfSS8vLy9Nvf/lZHjx7V3//93+vYsWP6j//4j1Net3r1at1zzz02RgoAAAAAAIYClSAAAMCzUqmUDMPQf/3Xf+myyy7Tddddp/vvv18/+9nPTlsNsmrVKjU3N1u32tpaG6MGAAAAAACDhUoQAADgWRUVFRo3bpyKi4ut+6ZPn650Oq2DBw/qggsu6Pe6aDSqaDRqV5gAAAAAAGCIUAkCAAA864orrtDhw4fV1tZm3bd7924FAgGNHz/ewcgAAAAAAIAdSIIAAICc0dbWpurqalVXV0uS9u3bp+rqatXU1EjKtLFaunSp9fibbrpJo0aN0rJly/T2229r48aN+vKXv6wvfOELys/Pd+IpAAAAAAAAG5EEAQAAOWPz5s2aO3eu5s6dK0lauXKl5s6dq7vuukuSVFdXZyVEJGn48OF6/vnn1dTUpPnz5+uzn/2slixZon/7t39zJH4AAAAAAGAvZoIAAICcceWVVyqdTp/y52vXrj3pvgsvvFDPP//8EEYFAAAAAADcikoQAAAAAAAAAADgSSRBAAAAAAAAAACAJ5EEAQAAAAAAAAAAnkQSBAAAAAAAAAAAeBKD0QEAlnQ6LcMwnA4DAAAAwBBKpdJK93N/f58E+vt4wGcGAEAuIQkCAD6XSqX1ws4j+tmm/dp64IRu+8j5uv3K8xUKUiwIAOdiy4Hj+vcX9qi6tklXTBmtz1w2QZefP4qNIwCA436wYY/+9Q+7h/z39JtAOeVj+/4kFDA0LBrSiIKwzhszXNPLC3XD3HE6b8zwwQ8UAOBpJEEAwOe++eQO/derNdb39z2/Wxt2N+rBmy5ReXGeg5EBQO6699l39OONe63vn36zTk+/Waf/cWmlvvvJixUMkAgBAHhfup9yk/4qUPp7cDKVVizRrePt3XqvsV3Pv92gf3thj66YMkrfuX4myRAAwBkjCQIAPvb24RY98lomAXLrhyZrwqhh+uff79SWAyd0639u1q9uq1JeOOhwlACQW17ff9xKgHx6/nh9YtZY/eHtej3yao0efb1WHd1J3ffp2QpTcQcAOeeHP/yhfvjDH2r//v2SpIsuukh33XWXFi9e3O/j165dq2XLlvW5LxqNqqura6hDPa1bPnSePveBiX3uO9OERbqfB/b/uP7+vlOkQPq5uzuZUnssqcbWmN5rbNOLuxv1p11H9Oc9x/TXP3xZP/rcPC04b1T/fx8AAL2QBAEAH/uX53YqnZY+MatCX//4DEnSh6aM1id/8GdtP9Ssu598S//nb2Y5HCUA5I7uREpfe3y7JOnG+ZXWa+iHp45R1Xmjdcej2/S7Nw4rGDB0/6dn0xoLAHLM+PHj9U//9E+64IILlE6n9bOf/UzXX3+9tm3bposuuqjfa4qKirRr1y7reze89ueFgzlz2GlaeaE+eMFo3Xz5JNUe79CKX2zTG7VN+txDr+onS+frymmlTocIAHA5jp8BgE+9uveY/rSrUaGAof91zTTr/kmjh+n/+8wlChjSY5tr9djrNaf5WwAAvf3kv/fq3SNtGjUsolXXXdjnZx+fVaEffX6eQgFDv912SN/747sORQkAOFtLlizRddddpwsuuEBTp07Vd7/7XQ0fPlyvvPLKKa8xDEPl5eXWrayszMaIvaVyZIEevfUDuvaicsWTaf3Dr97Q0baY02EBAFyOJAgA+NT312c23268tFKTRw/r87MPXjDaSox863dva29j2xn/vc0dcf1443v69lNv66V3jyqRTA1e0ADgYl3xpH704nuSpK9/fLpKCiInPeZj08v03U/OlCT92/p3STQDQA5LJpN69NFH1d7erqqqqlM+rq2tTRMnTlRlZaWuv/56vfXWW+/7d8diMbW0tPS5ISM/EtQD/2OOppUV6mhbt1Y9vr3fFl0AAJhIggCAD9U3d2nT3mOSpNuvPL/fx9z+kfN1+fmj1BlP6s7HqhV/n2RGVzypf163U1X/tF73PrtTD/95nz730Kv60D//SfXNzvY8BgA7PLu9Ti1dCY0rydf1c8ad8nE3XjpBf5997f3q49v1q821doUIABgE27dv1/DhwxWNRnXbbbfpt7/9rWbMmNHvY6dNm6aHH35YTz75pH7+858rlUrp8ssv18GDB0/7O1avXq3i4mLrVllZORRPJWflhYP63o1zFAkG9PzbDfrN1kNOhwQAcDGSIADgQ89sr1M6Lc2fOELjRxT0+5hAwNB9n56toryQ3jjYrP/z+52n/Pt2HGrWX/37S/rBhvfU0Z3UheWF+tS88SqMhlTX3KU/7ToyVE8FAFzjF69lqjr+x6WVCgZO3+/9y4um6fMfmKh0WvrKb97UT/97r5IpTrECQC6YNm2aqqur9eqrr+r222/XzTffrLfffrvfx1ZVVWnp0qWaM2eOPvKRj+jxxx/XmDFj9KMf/ei0v2PVqlVqbm62brW1JMz/0oyxRfrS1RdIku7/wy7FEkmHIwIAuBVJEADwoafeOCwpMxD9dCqK87X6rzNDfX/60j6tybZ5MSVTaT34pz365A/+rN0NbRo9PKI1n5un39/xIf3Lp2Zr6eUTJUnbak4MwbMAAPfYc6RVr+8/oWDA0Kfmv/9pXcMw9O3rL7ISIf/7mXd0/YMvad2OOjV3xG2IGABwtiKRiKZMmaJ58+Zp9erVmj17tr7//e+f0bXhcFhz587Vnj17Tvu4aDSqoqKiPjec7AtXTFZZUVSHm7v0y9dJFAEA+hdyOgAAgL1qj3eourZJAUO67n2SIFJmkG/tiQv1T7/fqX/6/U7VNXXq6hnl2nesXY+9XqMdhzL9iRddVKZ7P3mxRg2PWtfOqRwhSaqubRqS5wIAbvGL1zIbLx+9sFTlxXlndI2ZCJlaXqh/WbdTOw616Lafb5VhSGOL81VaFFVpYVSlhXmZr0VRVRTna9KoYRo3Iv99q028LplK63BTp/Yebdf+o+1q7owrkUxpeF5IE0cN0wWlwzV59DAZhr//nc5VOp1WfUuXKorznQ4FcK1UKqVY7MyGcyeTSW3fvl3XXXfdEEflD3nhoFZcNUXffPIt/fuf9uhT8yuVFw46HRYAwGVIggCAzzz9Zp0k6QPnjVJp4Zlt1N32kfPV1BHXmhff0882HdDPNh2wfjY8GtK3/uoi/T+XjDtpo2lOZYkk6d0jbWrpiqsoLzw4TwIAXCSRTOm32zK9yD9z2cB6thuGoc9/YKIWzyzXD/70nl7cfUTvNbbrUFOnDjV1nvK6cNBQ5cgCTRo1TJNGDVPlyHwNi4SUFwkqLxRQXjioge79R0NBjRwWVmlR3qC8XidTaQUMDSgJkUyldawtpoaWmOpbunS0LabO7qRiiZRiiaQ6u5OqOd6hvY3t2nesXd2J08+rGlEQ1twJIzRv4ghdPK5YY0vyNKIgIsMwlE6nlZaUSqeltCRDGjUsekbJpe5ESrvqW7XvWLvqmzvVFkuqIBJUcX5YlSMKNGFkgSpK8hQO2ld4n06nlUyllUhlvsaTKZ3oiOt4e0wBw1BBJKSxJXkqHMD/th3dCd3+8616cXejPnZhqb71VxepcmTfNppHWrv05LbDOnC8XUdbu5UfCaq8OE9TxgzX/EkjNGFkwfv+f+B4e7de3XtMuxpadfBEp4rzwyorimpaeZEuHleskcMi7xtrVzyp3Q2t2lnXqqbObpUURDR59DDNnziCRBgG1apVq7R48WJNmDBBra2teuSRR7RhwwY999xzkqSlS5dq3LhxWr16tSTp29/+tj7wgQ9oypQpampq0r/8y7/owIEDuuWWW5x8Gp7y6Usr9cMN7+lwc5d+8VqNll0x2emQAAAuQxIEAHzm6TfNVlhjB3TdP147TXMqS/SHt+v16t7jGj08oiWzx+qv5ow9ZTJlTGFU40fk6+CJTr1Z26wPXjD6nOMHALd5bf9xHW/v1oiCsD58wZiz+jtGD4/qriUzJM1QY2tMNcc71NjapSOtMR1pielI9s+Hmzq1/1iHuhMp7W1s197G9sF9MlkVxXmaWlaoaeWFmlZWqBljizSldPhJm/qpVFp1LV1653CL3q5r0VuHm7WzvlXH2rrVFksoYGSS5SOGRayqljGFURXmhRRPptUeS6ihpSt7i6mxLTag2SiRYECTRmeSQaOGRxQKBNTUGdf+o+3a3dCqEx1xvbDziF7YeWazqUIBQxUleaocUaDKEQUaPyLf2vQ/0dGtd4+0afvBZu2qb1V38vQJmGDAUFlhVEX5YeWFg5mESzZBkUqnNTwaUklBWONK8jV59DBNHjNc540epvLiTPIknU6rM57UoROd2nOkTe8eadOeI22qPdGhY23dVuWLmfRInOG/W+XIfF1YXqTp5YUqL86XYWQSCMfbu9WdTKkoL6zi/MztoZf2WdWc63ce0Ut7jurGSyt146WVqj3eoXU76vXM9jrFk6f+3aOHRzVvYonmTcwkoyaPHq6O7oRqj3fq9f3H9dK7R7X5wHGdLvzxI/I1a3yxLh5XolnjixUOBnS8Pab3Gtv1Tl2L3qlr0b6j7f3+HR+cMlp3L5mhC8oKz+jfB3g/R44c0dKlS1VXV6fi4mLNmjVLzz33nK6++mpJUk1NjQKBntfKEydO6NZbb1V9fb1GjBihefPm6eWXXz7lIHUMXDQU1O1XTdE3n9ihh/+8T0urJvm+WhIA0JeRTqddP4GxpaVFxcXFam5upg8mAJyD2uMd+tA//0kBQ9r8javP6GTlufqfv9imp944rH+4ZqpWfPSCIf99fsL7o334t8bp3P3kDv1s0wF9ev54/fPfzB7y32cmHvYfbde+o+06cKxdh5u61BlPqiuezH49/QZ9f7riSR1ri6mlK9Hvz0MBQ2VFeSotiiqdllWV0Rkf3EG0ASOTRC8vyiRM8iMhRUOB7C2osSV5Or90uM4fPfy0bcG6Eym9XdeirQdOaEvNCe2qb9WRlq5+n59hSAP9VFScH9a0skJVlOSpMC+kzu6UTnR0q/Z4h2qOdyj2PlUqp5MXDiiV1vtWuryfYZGgRg2PKq202roSOnEW82ZKCsL69vUz9ehrNXr5vWP9PuaSCSX64JTRGjU8qs54UoebOrXjULN2HGp532SR6cLyQs0cV6yJIwvUGkvoUFOn3j6cSW6cqREFYU2vKFJpYVQnOuLatPeYuhMpRYIBPfU/P6hp5SRCBhvvj/bh3/r0OruTWnDvH9XSldDDfztfH72wzOmQAAA2ONP3RypBAMBHnnurXpJ02eSRtiRApExLrKfeOKxtNU22/D4AsFMqldZzbzVIkq6dWW7L7wwEDI0ryde4knxdMWXwK+yaO+Pac6RVu+rbtKu+Re/Ut+qdwy3WxvRftukKBQxNKR2uGRVFmjG2SDMqilRRkq/CvJBSqbRauhI63t6txtZMRUtja0ztsYRCwYDyw0GVFUVVVpSn8uI8lRXlafTwM2tJ9X4ioYDmVJZoTmWJvqCe1ihmpclftupKptI60tql2uOdOniiQ7XHO1V7okMHT3QoYBgaURDR+JH5mjWuRBePK1blyPxTtllKpdJqbMtU7rTHkuroTihgGAoGDAUChgKG1B5L6Hh7XDXHO7TvaJv2NrbrwLEOdSdTfZJYw6MhnT9mmM4vHa4ppcM1edQwjSmMqqQgrHAwoGDAsL6GAkb2a0ChoHFS5c6J9m7trG/VzvpM9cSJjrjSaSkaCmjksIgioYBaOuNqzt6GR0P66uILdUFZoZbMqtCm947p4T/v0/qdRzR59DB9+IIx+uTccZqdbX/5l7riSe041KwtB05Yt2Pt3YqEAho1LKJLJo7Qgskj9dELSzV+REG/f0dzZ1xvHWrWm4eatf1gs9463CzDMDSiIKzKkQWaXlGkC8sLNaOiSGMKo33+N6k51qH/+YuteuNgs55+87CmlU87/f9pAOSs/EhQn5pfqYde2qf/3HSAJAgAoA+SIACQgzq7k3rktRpFgobGjyzQh6aMVugM+o7/IbtRd80MezbqJGnuhBJJmeHo6XSavtwAPOWNg02qb+nS8GhIl5/vjZZ/xflhzZs4UvMmjrTuS6Uyw7HrmjvV2NqtYMBQNBRQ5chMy6jTzb4oddmB5VMlWIIBQxXF+aooztdlk0f2+5gzFchWzZQVndnsLVMylVZrV1wtnQkZhjRyWEQFkeCgvXeOGBZR1fmjVHX+qAFfaxiGLp8yWpdPGa1kKn1Giaq8cFDzJ43U/EmZf890OtO2ayCzUorzw9bvHagJowp004IJeuPgdr2yt/8qFgDe8bkPTNRDL+3Ti7sbdeBYuyaOGuZ0SAAAlyAJAgA56Cf/vVf3P7/b+v6jF5bqoZvnn3aT5GhbTJsPHJckXXORfSejLhpbpEgwoGPt3ao93qkJo/o/6QkAuWhdtsLuqgtLlRcOOhzN0AkEDI0tydfYknynQ/G0YMBQSUFEJQX2VGuerbOt1DEMQ+GgvYchPnBeJuFTXdukzu6k8iPe/e8U8LvJo4fpw1PHaOPuRv3XqzX62nXTnQ4JAOASZ34EBwDgGs9ur5Mkza4sUSQU0As7j+h3bxw+7TXr32lQKi3NHFd0ypYTQyEaCmp6RaYH9/ZDzbb9XgAYaul0Ws/tyCRBrr3Ivgo7AGduwsgCjS3OUzyZ1pYDJ5wOB8AQW/qBiZKk32w5qPgZziQCAHgfSRAAyDEHjrVrZ32rggFDP1t2qe74WGbY+LefeltNHd2nvM7sWb/IxlZYplHDo5Iy/c8BwCv2Hm3X/mMdigQDunLaGKfDAdAPwzCsahBaYgHed+W0MRo9PKpj7d16cVej0+EAAFyCJAgA5BhzuPkHzhupkoKIbv3QebqgdLiOtXfr/6zb1e81x9u79dK7RyVJi2wa3Nub2fqim9NYADxkQ3Zz5bLJIzUsSpdZwK3MJMgmkiCA54WCAd0wZ6wk6TdbDzocDQDALUiCAECOWZdtvbIo23olEgrou5+8WJL02Os12nOk7aRrfr2lVt3JlC4eV6ypZYX2BZtlDkClJB2Al2zYdUSSqAIBXM4cBP9GbZM6uqlKBbzu/5k3XpK0/p0jp62UBwD4B0kQAMghR1q6tLWmSZJ0Ta+2VpdNHqmF08uUSkv3P9+3GiSdTusXr9VKkm5aMMG2WHuLZJMg3QmSIAC8oaM7oVf3HpckXTmt1OFoAJzO+BH5GleSr0Qqrc37mQsCeN30iiLNqChSdzKlp95nbiIAwB9IggBADvnD25m5HnMnlKi8OK/Pz768aJoMQ3p2e722H+wZQL5p7zHtO9qu4dGQ/mr2WFvjNUVCVIIA8JZN7x1TdzKl8SPydf6YYU6HA+A0DMPQrPHFkqT3Gk+umAXgPWY1yG+2HnI4EgCAG5AEAYAcsqu+VZJ0ebatQ2/Tygt1w5xxkqR7nnpLXfGkJOmRV2skSdfPGetYz3qzHVZ3Mu3I7weAwfanXq2wDMNwOBoA76cgklkDxahKBXzhr2aPVcCQqmubVHu8w+lwAAAOIwkCADmksTUmSSoryuv353cunKqCSFCbD5zQ8v/aqv+zbqee2V4nyblWWBIzQQB4SzqdtoaiXzmVVlhALjCrUmnNCfjDmMKoPnBe5uDY02/WORwNAMBpJEEAIIccbcskQcYMj/b78wmjCvTTm+crGgpo/c4j+uGG95ROS1/84GRdNLbYzlD7CIcyp6TjbDwA8IC9R9t18ESnIsGALp9ycmUeAPeJZpMgsUTS4UgA2OUTszKtgJ9+k7kgAOB3JEEAIIc0ZpMgowv7T4JI0uXnj9ZPls5XJBRQYV5ID950ib75iRl2hdivqNUOiyQIgNz35z1HJUnzJo6wWuwAcLdomEoQwG+unVmuYMDQW4dbtO9ou9PhAAAcxKc2AMghZjusU1WCmD48dYxe+spVioaDKs4P2xHaadEOC4CX/Pe7mSTIBy8Y7XAkAM6UeSCDmSCAf4wcFtEVU0Zr4+5GPf3GYf3Pj13gdEgAAIdQCQIAOaI9llBHd6aFw5jTVIKYSovyXJEAkaSw1YebwegAclsimdIr7x2TJH2IJAiQM6LhoCQqQQC/+cSsCknMBQEAvyMJAgA5wpwHkh8Oalg0twr5qAQB4BVvHGxSayyhkoKwo7OWAAxMhEoQwJcWzShXOGhoV0Or3m1odTocAIBDSIIAQI6wWmGdQRWI20SC2cHoJEEA5DizFdYV549WMGA4HA2AM2XOBGEwOuAvxQVhffiCMZKkp6gGAQDfIgkCADnCrATJySRIiGGkALzhJTMJMoVWWEAuibIWAXzrE7PNlliHlU7TnhcA/IgkCADkCLMSZPTwiMORDJzZDqubShAAOawtltC22iZJzAMBco15IIN2WID/LJxepkgooL2N7XqnjpZYAOBHJEEAIEc0tnVLys1KEGaCAPCC1/YdUzKV1oSRBaocWeB0OAAGIBrKDEYnCQL4T2FeWFdNy7TEevrNww5HAwBwAkkQAMgR1kyQ4XkORzJwPUkQys8B5K5N7x2TJF1+/iiHIwEwUAxGB/ztE7PGSpKefrOOllgA4EMkQQAgR1jtsApzrx1WJMRgdAC5b9PeTBKkiiQIkHOswehxBqMDfvSx6aXKCwdUc7xDb9e1OB0OAMBmJEEAIEdYg9GH5147rEgw04KCYaQAclVzR1xvHc5smlSdRxIEyDUR5pMBvlYQCekjUzMtsZ7bUe9wNAAAu5EEAYAcYbXDysmZIJlKEDYeAOSqV/YdUzotnTdmmEqLcq8tIeB30XB2JkictQjgV9fOLJckrXuLJAgA+A1JEADIAel0Wo3ZSpDROVgJEg4xGB1AbjPngVAFAuSmaIhKEMDvPnphmUIBQ7sb2vReY5vT4QAAbEQSBAByQGssYbWSysVKELMFRTzBEEIAuemVveZQ9NEORwLgbERCzAQB/K44P6zLp2Tex5+jGgQAfIUkCADkALMVVmE0pLxsO4dcEuH0JYAcdqwtpp31rZKkD5w30uFoAJwNKkEASNK1F2VbYjEXBAB8hSQIAOSAXJ4HIklhqxKEjQcAuefVfcclSdPKCjUqB1sSAuhVCZJIKZ2mMhXwq2suKpNhSG8ebNahpk6nwwEA2IQkCADkgKPmPJCcTYIwGB1A7rLmgZzPPBAgV0VDmUradFqKJ0mCAH41enhUl07KVHU+RzUIAPgGSRAAyAFWJUiOnkC2ZoKQBAGQg15+76gk6QMMRQdyltkOS+JQBuB3Vkss5oIAgG+QBAGAHGBWguR6O6xUWkqmOH0JIHccaenSe43tMgzmgQC5zDyQITEcHfC7RTMzSZDX9x+3DpsBALyNJAgA5ICjrd2SpFHDIg5HcnYivU9fMhcEQA7ZtDfTCmt6eZFKCnLzNRiAFAgYViKEShDA38aV5GvW+GKl09Lzbzc4HQ4AwAYkQQAgB3RmTywOi4YcjuTshIO0oACQm17JJkEuZx4IkPOs4ehx1iKA3107k5ZYAOAnJEEAIAfEEpkkSDScmy/b5mB0ibkgAHILQ9EB7zDngnAgA4A5F+TlPUfV3Bl3OBoAwFDLzd00APCZWLaFVDQUdDiSs2MYhpUIIQkCIFfUNXdq/7EOBQzp0snMAwFyHZUgAEznjRmuqWXDlUil9cJOWmIBgNeRBAGAHGB+WI+Gcvdl2+zDHU8wGB1AbjBbYV08rlhFeWGHowFwrsx1lFlhC8DfzGqQ32+nJRYAeF3u7qYBgI9Y7bByOAkStlpQsPEAIDe8tu+4JGnBebTCArzArKjtTlAJAkBalJ0L8uLuRnV0JxyOBgAwlHJ3Nw0AfMRqhxXOzXZYUs9w9G4qQQDkiFezSZDLJtEKC/ACqx0WSRAAkmZUFGnCyALFEim9uKvR6XAAAEOIJAgA5ICemSC5+7JttcNiJgjO0caNG7VkyRKNHTtWhmHoiSeeOONr//znPysUCmnOnDlDFh+8obE1pr2N7TIM6VKSIIAnREmCAOjFMAxdm60GWfcWLbEAwMtydzcNAHzEE+2wGIyOQdLe3q7Zs2frwQcfHNB1TU1NWrp0qT72sY8NUWTwktf3Z6pAppUVqriAeSCAF0SYCQLgLyzKzgV54Z0jvDYAgIeFnA4AAPD+egaj5247rIg1E4QkCM7N4sWLtXjx4gFfd9ttt+mmm25SMBgcUPUI/MmaBzKZKhDAK6gEAfCX5laWqLQwqiOtMb2855iuurDU6ZAAAEMgd48UA4CP9MwEyd2X7Z6ZIGw8wH7/8R//ob179+ruu+8+o8fHYjG1tLT0ucFfrHkgkxmKDniFdSCDtQiArEDAsKpB1u2gJRYAeFXu7qYBgI94ox2WOROEweiw17vvvquvfvWr+vnPf65Q6MyKYFevXq3i4mLrVllZOcRRwk2aO+PaWZ9JfF06eYTD0QAYLGZFLZUgAHpbnJ0L8vw7DUpQtQ4AnpS7u2kA4BPpdLrXYPQcbofFYHQ4IJlM6qabbtI999yjqVOnnvF1q1atUnNzs3Wrra0dwijhNlsOHFc6LU0ePUylhXlOhwNgkESpBAHQj8smj1RJQVjH27v1+v4TTocDABgCzAQBAJeLJ9NKZ4sncrodVojB6LBfa2urNm/erG3btmnFihWSpFQqpXQ6rVAopD/84Q/66Ec/etJ10WhU0WjU7nDhElYrrEnMAwG8hMHoAPoTCgZ09fQy/WrLQT33Vr2qzqcVJgB4Te7upgGAT/T+oJ7L7bAizASBA4qKirR9+3ZVV1dbt9tuu03Tpk1TdXW1FixY4HSIcKHXrHkgJEEALzEralmLAPhL187smQuSStG+FwC8ZsC7aRs3btSSJUs0duxYGYahJ5544rSPf/zxx3X11VdrzJgxKioqUlVVlZ577rmzjRcAfKd332ozkZCLrMHoVILgHLW1tVkJDUnat2+fqqurVVNTIynTymrp0qWSpEAgoJkzZ/a5lZaWKi8vTzNnztSwYcOcehpwqY7uhLYfbJZEEgTwmp5KENYiAPq6YspoDYsEVd/SpeqDTU6HAwAYZAPeTWtvb9fs2bP14IMPntHjN27cqKuvvlrPPvustmzZoquuukpLlizRtm3bBhwsAPhRzzyQgAzDcDiasxfObjzE2XjAOdq8ebPmzp2ruXPnSpJWrlypuXPn6q677pIk1dXVWQkRYKC21TQpkUprbHGexo/IdzocAIMoSjssAKeQFw7qo9PLJEnPvVXvcDQAgME24Jkgixcv1uLFi8/48Q888ECf7++99149+eSTeuqpp6zNCwDAqcXimQ/qudwKS+o9GJ3ycpybK6+8Uun0qf9/tHbt2tNe/61vfUvf+ta3BjcoeMarvVph5XLiGcDJIgxGB3Aa115UrqfeOKx1O+r11WsvZB0AAB5i+2D0VCql1tZWjRx56vYCsVhMsVjM+r6lpcWO0ADAlcxKkEi2j3WuitAOC0AOeG3fMUnSZZMZigp4TZR2WABO48ppYxQNBXTgWId21rdqekWR0yEBAAaJ7ceK//Vf/1VtbW369Kc/fcrHrF69WsXFxdatsrLSxggBwF16t8PKZeFQ5iQVpy8BuFUskdS2miZJzAMBvCgaZjA6gFMbFg3pw1PHSJJ+v4OWWADgJbbuqD3yyCO655579Mtf/lKlpaWnfNyqVavU3Nxs3Wpra22MEgDcxWqHFc7xJIjVDouNBwDutP1gs2KJlEYNi+j8McOcDgfAIIsGqQQBcHrXXlQuSXqOJAgAeIpt7bAeffRR3XLLLfrVr36lhQsXnvax0WhU0WjUpsgAwN16KkG80Q6LJAgAt3ptf2YeyKWTmAcCeJF5oIRKEACnsnB6mUIBQ7saWrW3sU3njRnudEgAgEFgy7HiX/ziF1q2bJl+8Ytf6OMf/7gdvxIAPMMz7bAYjA7A5TbvPyFJupRWWIAnRaxKkKTDkQBwq+KCsKrOz8wFW/cW1SAA4BUD3lFra2tTdXW1qqurJUn79u1TdXW1ampqJGVaWS1dutR6/COPPKKlS5fqvvvu04IFC1RfX6/6+no1NzcPzjMAAI8zP6jnehIkEmIwOgD3SqXS2pytBJk/cYTD0QAYCmYlCO2wAJzOtTNpiQUAXjPgHbXNmzdr7ty5mjt3riRp5cqVmjt3ru666y5JUl1dnZUQkaQf//jHSiQSWr58uSoqKqzbHXfcMUhPAQC8LRbPVoKEc7sdllkJQgsKAG60p7FNLV0J5YeDmjG2yOlwAAwBs7UoaxEAp3PNjHIZhvTGwWYdaup0OhwAwCAY8EyQK6+8Uun0qVuZrF27ts/3GzZsGOivAAD04p12WJn++swEAeBGZiusOZUlVtIWgLeYValUggA4nTGFUV06caRe239cz+2o1xc+ONnpkAAA54hPeADgct0ea4dFEgSAG222hqLTCgvwKnMtRSUIgPezKNsSi7kgAOANub2jBgA+0FMJktvtsCJWOywGowNwn80HMpUg8yYxFB3wqp5KEAajAzg9cy7I6/uPq7E15nA0AIBzRRIEAFzOSoKEc/sl22wvQyUIALc50tKlmuMdChjSJRNKnA4HwBAxD5SY89YA4FTGleRr1vhipdPS8283OB0OAOAc5faOGgD4QMwj7bDCtKAA4FJmFci08iIV5oUdjgbAULEqQTiQAeAMXEtLLADwjNzeUQMAHzBPK+Z+OywGowNwJ3MoOvNAAG/rPRMknaY9J4DTu/aiTBLk5T1H1dwRdzgaAMC5IAkCAC7XMxMkt1+yaYcFwK02H8gMRZ83kSQI4GW911LdrEcAvI/zxgzX1LLhSqTSWr+TllgAkMtye0cNAHzAaoeV4zNBzBYU3UlOXgJwj47uhN463CJJms9QdMDTIr2SIDHacwI4A9fOrJAk/X4HLbEAIJfl9o4aAPhATyVIbrfDohIEgBtV1zQpmUprbHGexpXkOx0OgCEUCfaqBCEJAuAMmC2xNu5uVHss4XA0AICzRRIEAFyuZyZIbr9km0kQNh0AuIk5FH0eVSCA5xmG0TMcnfUIgDMwvaJQE0cVKJZI6cXdjU6HAwA4S7m9owYAPmC1w8rxJEiEShAALmQmQRiKDviDuZ6KxZMORwIgFxiGYVWD0BILAHJXbu+oAYAPWO2wwjneDitkSCIJAsA9kqm0tpqVIAxFB3whas0oYz0C4MwsmplJgrzwToO6SKACQE4iCQIALtczEyS3X7IjtMMC4DK76lvVFktoeDSkC8uLnA4HgA3MGWtmu1EAeD9zxpeovChP7d1JvfzeUafDAQCchdzeUQMAH/BKOyxrJggnLwG4xOYDxyVJcyeUKBgwHI4GgB2oBAEwUIGAoUUXlUmSfr+dllgAkItye0cNAHygZzB6brfDMgeRxpNphyMBgIzN+815IAxFB/zCGoxOJQiAAbh2ZoUk6fl3GpQgiQoAOYckCAC4XM9MkNx+yTYrQZKptJIpEiEAnLd5f6YSZD7zQADf6KkEoa8/gDN36aQRGjksoqaOuF7dd9zpcAAAA5TbO2oA4ANeaYcV6RU/w9EBOO1wU6cON3cpGDA0Z0KJ0+EAsAmVIADORigY0NXTMy2x1u2gJRYA5Jrc3lEDAB/oGYye2+2wwsGefvskQQA4bfOBTCusi8YWqSAScjgaAHaxBqMnWIsAGJhrLy6XJD33Vr1SVLYDQE4hCQIALtczEyS3X7LDgZ74u9l4AOAwsxXWPFphAb5itcNiLQJggC4/f5QKoyEdaY1pW+0Jp8MBAAxAbu+oAYDHpdPpnnZYOT4TJBAwFApkqkEYjg7AaQxFB/zJaoeVYCYIgIGJhoL62PRSSbTEAoBck9s7agDgcYlUWmalda63w5J6hqPTDguAk1q74tpZ3yKJoeiA30StJAhrEQADd+3MTEus3++oVzrNwS4AyBUkQQDAxXp/QM/1dlhSz+nLbpIgABy0raZJqbQ0YWSBSovynA4HgI0iJEFwjn74wx9q1qxZKioqUlFRkaqqqvT73//+tNf86le/0oUXXqi8vDxdfPHFevbZZ22KFoPtw1PHKC8c0METnXrrcIvT4QAAzlDu76gBgIfF4j2tGryQBKESBIAbmEPRqQIB/IckCM7V+PHj9U//9E/asmWLNm/erI9+9KO6/vrr9dZbb/X7+Jdfflmf+cxn9MUvflHbtm3TDTfcoBtuuEE7duywOXIMhoJISFdOzbTEeu4tWmIBQK7I/R01APAw8wN6JBSQYRgOR3PuIsHMc2AYKQAnbTmQHYo+iSQI4DfB7HqKNjY4W0uWLNF1112nCy64QFOnTtV3v/tdDR8+XK+88kq/j//+97+va6+9Vl/+8pc1ffp0fec739Ell1yif//3f7c5cgwWsyXWs9vreC0BgBxBEgQAXMxMgnihCkSSwiEqQQA4K5FMqbqmSZI0fyJD0QG/CQQySZAUG5cYBMlkUo8++qja29tVVVXV72M2bdqkhQsX9rlv0aJF2rRpkx0hYgh8dHqpIsGA3mts1+6GNqfDAQCcgZDTAQAATi2WyLTD8sJQdEmKZNthdSfYeADgjF0NrWrvTqowGtIFpcOdDgeAzQKGmQRxOBDktO3bt6uqqkpdXV0aPny4fvvb32rGjBn9Pra+vl5lZWV97isrK1N9/elbKcViMcViMev7lhbmT7hFUV5YH546Wn9854ie2V6naeWFTocEAHgf3jhaDAAeFYt7rBKEmSAAHLY1WwUyZ0KJdSIcgH+Y/9lTCYJzMW3aNFVXV+vVV1/V7bffrptvvllvv/32oP6O1atXq7i42LpVVlYO6t+Pc3PdxRWSMi2xAADu541dNQDwKKsdVtgbL9e0wwLgtK3ZoejzGIoO+JJVCUIpCM5BJBLRlClTNG/ePK1evVqzZ8/W97///X4fW15eroaGhj73NTQ0qLy8/LS/Y9WqVWpubrZutbW1gxY/zt3CGWWKBAPac6RNuxtanQ4HAPA+vLGrBgAe5b12WAxGB+CsLSRBAF8zaIeFIZBKpfq0ruqtqqpK69ev73Pf888/f8oZIqZoNKqioqI+N7hHUV5YH7pgtCTpmTepBgEAtyMJAgAu5tV2WN1UggBwQGNrTDXHO2QY0pzKEqfDAeCA7FKEdlg4a6tWrdLGjRu1f/9+bd++XatWrdKGDRv02c9+VpK0dOlSrVq1ynr8HXfcoXXr1um+++7Tzp079a1vfUubN2/WihUrnHoKGCS0xAKA3MFgdABwMasdlkeSIBGrHRYbDwDst7UmUwUyraxQhXlhh6MB4ASzHRY5EJytI0eOaOnSpaqrq1NxcbFmzZql5557TldffbUkqaamRoFAz9r98ssv1yOPPKJvfOMb+trXvqYLLrhATzzxhGbOnOnUU8AgWTijTOGgoXePtOndhlZdUMaAdABwK5IgAOBiVjussDfaYTEYHYCTzHkgcyfQCgvwq552WGRBcHYeeuih0/58w4YNJ933qU99Sp/61KeGKCI4pTg/rA9dMEYv7DyiZ7bX6UskQQDAtbxxtBgAPMpzlSAkQQA4yKwEYR4I4F+BTA6EJAiAQUFLLADIDd7YVQMAj4rFM5UgEY8kQcIMRgfgkO5ESm8cbJZEEgTwM7MdFucxAAyGq7MtsXY3tGnPkVanwwEAnII3dtUAwKO8VgkSCNCCAoAz3jrcrO5ESiOHRTRpVIHT4QBwSDBgzgRhLQLg3BXnh/XBKaMlSc+8We9wNACAU/HGrhoAeFRPEsQbM0ECVh9uhwMB4Dtba5okSZdMKLFmAgDwH4N2WAAGGS2xAMD9SIIAgItZg9G9UgmS3XhIkgUBYDNzKPoltMICfI0DGQAG2zUzyhUOGtrV0Ko9R9qcDgcA0A9v7KoBgEfF4tlKkLA3Xq5pQQHAKVvMJMgEkiCAnzEYHcBgKy4I64psSyyqQQDAnbyxqwYAHuW1dlgGw0gBOOBwU6fqW7oUDBiaPb7E6XAAOMiqBKEUBMAgoiUWALgbSRAAcDGvtsPi9CUAO5lVIDMqipQf8UZSGcDZoR0WgKFwzYwyhQKGdta36r1GWmIBgNt4Y1cNADyqpxLEGy/XQYN2WADsZyZB5jEPBPA9DmQAGAolBRGrJdbvqQYBANfxxq4aAHhUz0wQb5xcttphsfEAwEbbahiKDiAjYM0nczgQAJ7z8WxLrKffJAkCAG5DEgQAXMxqhxX0xsu1ORidFhQA7NLZndRbh1skSZdMKHE2GACOM6x2WCxGAAyuay7qaYm15wgtsQDATbyxqwYAHpXIZgvCIcPhSAYHLSgA2O3Ng01KpNIqK4pqXEm+0+EAcJi5FklyIgPAICspiOhDF2RaYj395mGHowEA9EYSBABcLJHMfEAPBrzxcm0NI2XjAYBNttY0ScrMAzFPgAPwryCD0QEMoSWzx0qSnnrjMHMQAcBFvLGrBgAelUhlZoKEA97YuAvQDguAzcyh6JdMYB4IgJ4DGWxOAhgKV88oUyQU0HuN7XqnrtXpcAAAWSRBAMDFzHZYIY/MBKEdFgA7pdNpbWUoOoBeDNYiAIZQYV5YH51WKkl6ipZYAOAa3thVAwCPMtthhbxSCUI7LAA2OnCsQ8fbuxUJBnTR2CKnwwHgAgHaYQEYYrTEAgD3IQkCAC4WT2baYYWCHkuC8FkAgA3MVlgXjy9WNBR0OBoAbmCOWaMSBMBQ+eiFpSqIBHXwRKeqa5ucDgcAIJIgAOBqyZQ5GN1bSZAkGw84Bxs3btSSJUs0duxYGYahJ5544rSPf/zxx3X11VdrzJgxKioqUlVVlZ577jl7goWjtmRbYc2jFRaArJ4DGaxFAAyN/EhQV88okyQ99Uadw9EAACSSIADgauZMkLDHZoJQFo5z0d7ertmzZ+vBBx88o8dv3LhRV199tZ599llt2bJFV111lZYsWaJt27YNcaRw2lZrKHqJs4EAcI2e1pwOBwLA05bMyrTEevrNw9bBNgCAc0JOBwAAODWrHZZXKkECbDzg3C1evFiLFy8+48c/8MADfb6/99579eSTT+qpp57S3LlzBzk6uEVrV1y7GlolSZdMoBIEQAaVIADs8KGpo1WUF9KR1phe339cHzhvlNMhAYCveeNoMQB4lHlqKBTwxss17bDgBqlUSq2trRo5cqTToWAIvVHbrHRaqhyZr9KiPKfDAeASPVWpzsYBwNuioaCunVkuKTMgHQDgLG/sqgGAR8WT2SSIRwajBxlGChf413/9V7W1tenTn/70KR8Ti8XU0tLS54bcssVqhUUVCIAeBgcyANhkyexMS6zf76i3KvwBAM4gCQIALpbM9o0KeyQJYlaCsO8ApzzyyCO655579Mtf/lKlpaWnfNzq1atVXFxs3SorK22MEoOBoegA+mNWgnAgA8BQqzpvlEYNi+h4e7defu+Y0+EAgK+RBAEAF0tkK0GCHmmHZZ2+ZDggHPDoo4/qlltu0S9/+UstXLjwtI9dtWqVmpubrVttba1NUWIwpFJpbauhEgTAyYLmfDKWIgCGWCgY0HUXV0iiJRYAOM0bu2oA4FHxlLcGowc5fQmH/OIXv9CyZcv0i1/8Qh//+Mff9/HRaFRFRUV9bsgdexrb1NqVUH44qAvLC50OB4CL9FSlshYBMPTMlljP7ahXLJF0OBoA8C+SIADgYtZgdK+0wwrQDgvnrq2tTdXV1aqurpYk7du3T9XV1aqpqZGUqeJYunSp9fhHHnlES5cu1X333acFCxaovr5e9fX1am5udiJ82MCcBzKnskShIMtdAD0MDmQAsNH8iSNUXpSn1lhCL+5qdDocAPAtPhUCgEul0+mewei0wwIsmzdv1ty5czV37lxJ0sqVKzV37lzdddddkqS6ujorISJJP/7xj5VIJLR8+XJVVFRYtzvuuMOR+DH0tppD0SeWOBsIANcJWGsRhwMB4AuBgKFPzMq2xHqzzuFoAMC/Qk4HAADoX+88gVcGowcNsw83SRCcvSuvvPK0bUzWrl3b5/sNGzYMbUBwHYaiAzgV2mEBsNuS2WP105f26Y9vN6ijO6GCCFtxAGA3bxwtBgAPivc6ohj0yEyQAC0oAAyxE+3d2tvYLkmaW0kSBEBfZnEtaxEAdpk1vlgTRhaoM57U8283OB0OAPgSSRAAcKlEr1KQsEd62pszQeiGBWCobKvNVIGcN2aYRgyLOBwNALcJGKxFANjLMAzdMCczIP2JbYccjgYA/Mkbu2oA4EHJZM+nc+9UgtAOC8DQMoeiz5tAFQiAk7EWAeCE6+eOkyRtfPeojrbFHI4GAPyHJAgAuFQ81dMOK+SZJEjmK4PRAQyVLdZQdJIgAE5mrkXIgQCw0/ljhmv2+GIlU2k9/cZhp8MBAN8hCQIALpXIVoKEAoYMwxtJELOihY0HAEMhkUzpjdpmSQxFB9A/c03FgQwAdrshWw3y22qSIABgN5IgAOBSiWwliFdaYUk9Gw+0oAAwFHbWt6oznlRhXkhTxgx3OhwALhQMsBYB4IxPzBqrYMDQG7VN2tvY5nQ4AOArJEEAwKXMShCvDEWXaIcFYGiZrbDmThihgIcSyAAGD+2wADhlTGFUH7pgtCTpCapBAMBW3tlZAwCPSWQTBaGgdzbyggbtsAAMna01DEUHcHoMRgfgpE9mW2I9se2Q0rwOAYBtSIIAgEuZ7bC8MhRd6tWHmwU/gCFgVoIwDwTAqZhj1kiCAHDC1TPKVBAJquZ4h7bWNDkdDgD4BkkQAHCpnsHo3nmpDrDxAGCIHGnp0sETnTIMaXZlsdPhAHCpgDUY3eFAAPhSQSSkay8ql5SpBgEA2MM7O2sA4DFmOywvDUbvGUbqcCAAPMdshTWtrFCFeWGHowHgVuZahDY0AJxyQ7Yl1tNvHlZ3gowsANiBJAgAuFQie0Qx7KGZIFYfbrIgAAaZ2QrrElphATgNqlIBOO3y80dpTGFUJzri2ri70elwAMAXSIIAgEvFzXZYQe+8VAcCDCMFMDSseSAMRQdwGoZBVSoAZ4WCAf3V7LGSpN9W0xILAOzgnZ01APCYZMqcCeKlSpDMVzYeAAymWCKpHYdaJDEUHcDpWVWpHMgA4KBPZlti/fHtBrV0xR2OBgC8jyQIALhUPJVphxWiHRYAnNaOQy3qTqY0clhEE0cVOB0OABezDmSwFgHgoIvGFmlK6XDFEimt21HvdDgA4HkkQQDApZJmO6yAd16qOX0JYChsNeeBTBhhtboBgP4EaIcFwAUMw7CqQZ7YRkssABhq3tlZAwCPSZiVIJ5sh8XOA4DBs7UmOw+EVlgA3gfzyQC4hTkXZNPeY6pr7nQ4GgDwNpIgAOBSPYPRPZQECXD6EsDgSqfT1lD0SyaUOBsMANczD2SQAwHgtMqRBbps0kil09IT2w47HQ4AeBpJEABwqZ7B6N55qaYdFoDBdqipU0daYwoFDM0aX+J0OABcjrUIADf560syLbF+vaVWaV6XAGDIeGdnDQA8Jp704mD0zNckpSAABolZBTJjbJHyI0GHowHgdubYoCSbjQBc4OOzKpQXDui9xnZV1zY5HQ4AeBZJEABwqYSHK0HYdwAwWHoPRQeA99N7LcKpawBOK8wLa/HMCknSr7YcdDgaAPAu7+ysAYDH9CRBvFMJEmQYKYBBtrWmSZJ0CUPRAZyBoNGzrmI5AsANPjVvvCTpqTcOqyuedDgaAPAmkiAA4FIJD7bDMmiHBWAQdXQn9HZdiyRpHkkQAGcg0CsJwqEMAG7wgfNGaVxJvlq7EnrurXqnwwEATyIJAgAuZSYKwkHvvFT3VII4HAgAT3jzYLOSqbTKiqIaW5zndDgAcoDRa1nFegSAGwQChv6fbDXIr2mJBQBDwjs7awDgMfFk5pN50EPtsHr6cLPrAODcmUPR500cIcPwzmslgKFDJQgANzJbYr2056gONXU6HA0AeA9JEABwKbMdVthD7bDMfE6STQcAg2BbDUPRAQxM77MlJEEAuEXlyAJ94LyRSqelx6kGAYBBRxIEAFzKHIzuxUqQFP0nAJyjdDrNUHQAA9a3EsTBQADgL/zNvEpJ0q+3HqRyHgAGGUkQAHCpRCo7GD3gnZfqnnZYDgcCIOftP9ah4+3dioQCumhskdPhAMgRtMMC4FbXXVyuYZGgDhzr0Gv7jjsdDgB4ind21gDAYxLWYHTvVYLQDgvAuTLngVw8rljRUNDhaADkit4FtumUc3EAwF8qiIT08VkVkqRfbqYlFgAMJpIgAOBSCWswundeqs2nwslLAOdqa03PUHQAOFO9K0E4lAHAbW68NNMS65nth9XcGXc4GgDwDu/srAGAx3hzMLo5E8ThQADkvK0HzKHoJc4GAiCnGAxGB+Bil0wYoallw9UVT+nJ6kNOhwMAnkESBABcytOD0dl0AHAOWrvi2tXQKimzWQAAZ8owDKslFusRAG5jGIY+c9kESdIjr9YwIB0ABglJEABwKbMdVjjonZdq2mEBGAzVtU1Kp6XxI/JVWpTndDgAcox5KIPlCAA3+uTccYqGAtpZ36rq2ianwwEAT/DOzhoAeEw82zMq5MlKEHGqCcBZ23qgSRLzQACcHSpTAbhZSUFEH784MyD9F6/VOBwNAHgDSRAAcKmkB9thBXs14mbfAcDZ2lJjzgMhCQJg4AyrHZazcQDAqXxmQaYl1lNv1KmliwHpAHCuSIIAgEt5sh1WryQIpy8BnI1UKq1t2SQIlSAAzoZVCUIWBIBLzZ84QheUDldnPKknqw87HQ4A5Dzv7KwBgMckzHZYQe9Ughi93nWSJEEAnIU9jW1q7UooPxzUheWFTocDIAeZVbYcyADgVgxIB4DBRRIEAFzKrATx0kwQ2mEBOFdbD2SqQGZXFivkoUo5APahHRaAXPDXl4xTJBTQO3UteuNgs9PhAEBOG/Anx40bN2rJkiUaO3asDMPQE0888b7XbNiwQZdccomi0aimTJmitWvXnkWoAOAv8ZSZBPHOJh/tsACcqy0HmAcC4NwwGB1ALug9IP2RVw84HA0A5LYB76y1t7dr9uzZevDBB8/o8fv27dPHP/5xXXXVVaqurtaXvvQl3XLLLXruuecGHCwA+EnSi+2wej2VJMcvAZyFLcwDAXCOzCJb2ssAcLubsgPSf/fGYTV1dDscDQDkrtBAL1i8eLEWL158xo9fs2aNJk+erPvuu0+SNH36dL300kv63ve+p0WLFg301wOAb8ST3qsECQZ6V4I4GAiAnHSivVt7G9slSXOpBAFwlsxKkGTK4UAA4H3MnzhC0yuK9E5dix57vVb/70fOdzokAMhJQ76ztmnTJi1cuLDPfYsWLdKmTZtOeU0sFlNLS0ufGwD4TSLpvUqQPu2wyIIAGKBttZkqkPNGD9PIYRGHowGQqwIMRgeQIwzD0N9ePlGS9H9fOUA1PQCcpSFPgtTX16usrKzPfWVlZWppaVFnZ2e/16xevVrFxcXWrbKycqjDBADXSaa8Nxi991Nh4wHAQG090CRJuoRWWADOQcAajM5aBID7XT9nnEoKwjp4olPr32lwOhwAyEmu7LGyatUqNTc3W7fa2lqnQwIA21ntsIKufKk+K4ZhWHNBOMQEYKAYig5gMJiVqeRAAOSCvHBQ/+PSzGyQtS/vdzYYAMhRQ76zVl5eroaGvpnqhoYGFRUVKT8/v99rotGoioqK+twAwG/MSpCwhypBpJ6NB05fAhiIRDKlNw42SWIoOoBzw1oEQK75fNVEBQzp5feOaXdDq9PhAEDOGfIkSFVVldavX9/nvueff15VVVVD/asBIKfFU5mZIEGPJUGCbDwAOAs761vV0Z1UYTSkC0qHOx0OgBxmVqXSWx9ArhhXkq9rZpRLohoEAM7GgJMgbW1tqq6uVnV1tSRp3759qq6uVk1NjaRMK6ulS5daj7/tttu0d+9efeUrX9HOnTv1gx/8QL/85S915513Ds4zAACPSniwHZYk2mEBOCvbajKtsOZMKLGGGgPA2Qhag9EdDgQABuBvr5gkSfrt1kNq7og7GwwA5JgB76xt3rxZc+fO1dy5cyVJK1eu1Ny5c3XXXXdJkurq6qyEiCRNnjxZzzzzjJ5//nnNnj1b9913n376059q0aJFg/QUAMCbvDgYXerVgoKdBwADwDwQAIOlZyYIaxEAuWPB5JG6sLxQnfGkfrmZ2bkAMBChgV5w5ZVXnnaxuHbt2n6v2bZt20B/FQD4WjyZaYcVCnorCdJz+pKNBwBnbmtNkyTmgQA4d1SlAshFhmHoby+fpK8+vl3/+cp+feGDkz3XOhkAhoq3eqwAgIdYg9E92g6LPtwAzlRja0w1xztkGJl2WABwLhiMDiBXXT9nnEYUhFV7vFPrdtQ7HQ4A5Axv7awBgIeYlSBeO93Ts/HgcCAAcsbW7DyQqaWFKsoLOxwNgFxnLq1ozYmzsXr1al166aUqLCxUaWmpbrjhBu3ateu016xdu1aGYfS55eXl2RQxvCQ/EtTSqkmSpB9tfI+2fgBwhkiCAIBLJcxKkIC3XqrNpA4LdgBnaqs5D2RiibOBAPAEDmTgXLz44otavny5XnnlFT3//POKx+O65ppr1N7eftrrioqKVFdXZ90OHDhgU8TwmqVVExUNBfTmwWa9sve40+EAQE4Y8EwQAIA9zCSI12aCmKcvkyRBAJwhsxKEoegABgPtsHAu1q1b1+f7tWvXqrS0VFu2bNGHP/zhU15nGIbKy8uHOjz4wKjhUX16fqX+7ysH9KON76nq/FFOhwQAruet48UA4CEJczC6V9thpRwOBEBO6E6k9MbBZknSJQxFBzAIzCJbkiAYDM3NmfeokSNHnvZxbW1tmjhxoiorK3X99dfrrbfesiM8eNQtH5qsgCFt2NWoHYeanQ4HAFyPJAgAuFAqlbZaNIQ8Nhid05cABuLtuhZ1J1IqKQjrvNHDnA4HgAeYaxGWIjhXqVRKX/rSl3TFFVdo5syZp3zctGnT9PDDD+vJJ5/Uz3/+c6VSKV1++eU6ePDgKa+JxWJqaWnpcwNME0cN0ydmjZUk/fsLexyOBgDcz1s7awDgEYleTaq9Nxg985UkCIAzseVATyssw/DW6yEAZ5ivJUmGguAcLV++XDt27NCjjz562sdVVVVp6dKlmjNnjj7ykY/o8ccf15gxY/SjH/3olNesXr1axcXF1q2ysnKww0eOW/HRKTIMad1b9dpZT5IMAE6HJAgAuFCiV6+osNdmggQYRgrgzJnzQObRCgvAIOFABgbDihUr9PTTT+tPf/qTxo8fP6Brw+Gw5s6dqz17Tn2Cf9WqVWpubrZutbW15xoyPGZqWaGum1khSfr/qAYBgNMiCQIALtS7EiQU8NZLNe2wcK42btyoJUuWaOzYsTIMQ0888cT7XrNhwwZdcsklikajmjJlitauXTvkcWJwbD3AUHQAgytocCADZy+dTmvFihX67W9/qxdeeEGTJ08e8N+RTCa1fft2VVRUnPIx0WhURUVFfW7AX1rx0SmSpGe31+mdOqpBAOBUvLWzBgAekUj2ToJ4rBLEPH3JzgPOUnt7u2bPnq0HH3zwjB6/b98+ffzjH9dVV12l6upqfelLX9Itt9yi5557bogjxbk63NSpuuYuBQOGZlcWOx0OAI/omQnCWgQDt3z5cv385z/XI488osLCQtXX16u+vl6dnZ3WY5YuXapVq1ZZ33/729/WH/7wB+3du1dbt27V5z73OR04cEC33HKLE08BHjK9okgfn1WhdFq699l3nA7HFvFkSkdaunSkpUvH27uVSKbe/yIMWDqdds375NG2mJ7dXqf2WOJ9H9vcGVdzZ/ysY+9OpLTvaLvqmjvV2Z0c8PXxZEpH22I60tLFnofLhJwOAABwMnMhFzB62kd5Be2wcK4WL16sxYsXn/Hj16xZo8mTJ+u+++6TJE2fPl0vvfSSvve972nRokVDFSYGgdkKa3pFoQoiLFsBDA7DaoflbBzITT/84Q8lSVdeeWWf+//jP/5Df/u3fytJqqmpUaBXNfeJEyd06623qr6+XiNGjNC8efP08ssva8aMGXaFDQ/7x0UX6g9v1eu/3z2qF3c36iNTx9j6+5OptLXx3J1IKZ7M3BJn8SLbnUipuTOuluzfd6IjriOtXWpsjamxNaYjrTEdb+/uc41hSCX5YY0cFtGo4VGNHh7J/HlY5s/BQECxRFJd8ZT1tSueVCyRUiyeVCqd1vC8kIrywirMC6swL6Si/OzXvJAK88IKBwMKBQyFgoZCgYAK80LKCwfP+HnFkynVNXWpvqVLLZ1xtXcnsrFn3pB6f+Jv7UroREe3mjq61dSR+Tdo7uxWOi3lR4IqygurtCiq0sI8lRZGVVaUp9KiqIrzw+pOZJ7biY64jrdn/o7mzrgSqbRSqbRSaSmZTiuZyvzvk0imlUim1BpLqKUzrqaOuJqy//bNHXGl0mkV54dVXBBWcX5YJflhlRREVFIQVkl+RIV5IYWChgzDUNAwFAzI+nMgkDl0EA0FlRcOKJ2WYomkjrV3q6E5829R3xJTZ3ci8+8bDCgcMLJ/NpQfDqq8OE/H27v16y0HFUukVF6Up29+Yoauu7jc+rdLp9Pa+O5Rrdnwnt6ua1FzZ1xS5jBpeXGezh8zXBeWF+ri8cWaPb5E40fk95kz2NTRra01J7Rx91G9sveY9hxp6/P/3dHDIzpv9HBNHj1M540ZpvPGZP4sSUdau7TvaLvebWjTu0datbuhTY2tMevaUMDQ+WOG6yvXTtPHpped8f9fMDT4NAkALmS+6XqtFZbUc/qSYaSwy6ZNm7Rw4cI+9y1atEhf+tKXTnlNLBZTLNazgG1pob2AE7YeaJJEKywAg8tai7jkhCtyy5mcLt6wYUOf77/3ve/pe9/73hBFBL+bMKpAN1dN0k9f2qd7n3lHH5wyWsGzPEiXTqd1tK1bB461q7E1ppaueDYpkej157hauhJq6ujWiY64mjq6bU8qm3vY6XTmdiKbLHivsd22GCKhgJUYKM4Pq6QgrKLsnw0ZOt4eU31Ll2qPd6quuTNnE+/H2rt17C8ST04oiARV39Kl5Y9s1aWTRuiOj01VXXOnfrXloF7bd/ykxydSaR080amDJzr14u5G6/4RBWFNGj1MiWRax9u7daip86Rr88IBxZNpJVOZ/x6Oth3Xa/tP/h2nEzAyMexqaNUXf7ZZ188Zq3/+m1mKhs48eYbBRRIEAFzIbIcV8thQdKmnHZZbSmvhffX19Sor63vypqysTC0tLers7FR+fv5J16xevVr33HOPXSHiFLYwFB3AEDDPmLAWAeAVKz46Rb/aclC7Glr14417dfuV5w/o+jdqm/TbbYf09Jt1OtoWe/8L+jE8GlI0FLBO8oeDAQ3002woaGQqD/LDKsrLVCCYFQ9jCqMqLYpqzPCoRhREFAgYSqbSOtHRrePt3TraFtOxtsyfj7XFdDT7NZnKbGqbFQl54aCioczXvHBAhgyrEqK1K6HWrszXlq6e7xPJtOKplJKptOLZz+rdiZRVoXImoqGAKorzVJwfVkEkJMPIJHAkKa209efCvJBKCiIaURDOfs1UXgQMQ53xhE60x3WkNaYjrV060pL92hpTa1dC4aChvHBQI7LXjyiIqDg/U8liVmYEDEPBgKFw0FAwkKlwGZ4X6pPQKc7+bkM97aWashUpzb0qRlo6M9UiqVTmYEE6nUkcpNKZGaDJVFrdiZQ640kFDEORUEAl+WGVFeepvChzG54XylQOJdOZKqJUpjqlPZZQXXOXYomUPjl3nOZNHKEfbnhPa158T6/vP6HPPfSq9W8bCQb0+aqJ+pt54zVxVIEChqETHd2qPd6pd4+06u3DLdp+qFnv1LVkEmY1TX3+t5k4qkBXTBmtD18wWhePL9HY4jxJUktXQjXHOrT3aJv2NrZr79F27Tvapn2N7QoYhsYURVU5okBTy4brgrJCTS0r1MSRBSrKDyudTquhNab/fHm/fvLfe/Vk9WHNnzhCn6+aNMD/KjBYSIIAgAvFU5l2WF6bByL1HozucCDAaaxatUorV660vm9paVFlZaWDEflPVzyptw83S6ISBMDg6lmLsBgB4A0lBRF9/ePT9ZVfv6n7n9+lD08drYvGvv88tQPH2vXdZ97RH95usO4zDGlscb7Ksxv2mYREyKpyKMrLVDwU5Yc0alhUI4ZlNtvDQfu7GAQDhkYPj2r08KimlhXa8jvT6bTaYgkrOdDcEe9JFGS/ptJpjRoW0ZjCzCb5hJEFGj08mpOtrseWnHxgzCl3Xj1V/+OySv3b+nf1+NZDmjx6mBbPrNDfzB+vcX8RZ0VxviqK83XZ5JHWfbFEUjvrWlXX3KVoKKDheSFNLStUcX64399XnB/WxeOLdfH4s5lNaGhcSb5WXTddBZGQvvfH3dq09xhJEAeRBAEAFzJbRYUcWEgONVpQwG7l5eVqaGjoc19DQ4OKior6rQKRpGg0qmg0akd4OIXth5oVT6Y1pjCq8SPc8+ELQO6zkiDM0gXgIZ+aN15/fLtBf3i7QXc+Vq0nl39Q+ZH+W+/Ekyn9eONeff+P76o7mVIwYOgTsyp0w9xxqjpv1IDmXfiNYRjZ2SFhjeecju0qivO1+q9n6d5PXtxntseZiIaCml1Zotk2n22rOn+UvvdH6bV9J5ROpwccNwYHSRAAcKF40ruVIMEApy9hr6qqKj377LN97nv++edVVVXlUEQ4E1sOZFphXTKhhA8KAAaVubxiLQLASwzD0Oq/vlhba05od0ObPvfQq3r45ktVXND3lPvm/cd19+/e0luHMzPvPnTBaN31iRm6wKZKCmAw5NLng1njixUJBnS0Lab9xzqsweqwl/eOGAOAB5iVIE6UFA81ZoLgXLW1tam6ulrV1dWSpH379qm6ulo1NTWSMq2sli5daj3+tttu0969e/WVr3xFO3fu1A9+8AP98pe/1J133ulE+DhDWw8wDwTA0DArQViKAPCaUcOj+tHn56soL6QtB07ob9a8rF9trtW7Da363RuHdcvPNutv1mzSW4dbVJwf1vdunK3//MJlJECAIZQXDmp2Zaal1uv9DHGHPagEAQAXMoetBT1YCWKe2EjSggJnafPmzbrqqqus783ZHTfffLPWrl2ruro6KyEiSZMnT9YzzzyjO++8U9///vc1fvx4/fSnP9WiRYtsjx1nJp1Oa2uNWQlCEgTA4DJozQnAw+ZNHKFf3Xa5lj78qt490qYv//rNPj8PGNKNl1bqzqunqrQwz6EoAX+5dNJIvb7/hF7bf1yfvpRZk04gCQIALpQw22EFvZcEoR0WztWVV1552kqitWvX9nvNtm3bhjAqDKba45062tatcNDQzHFnM4gQAE7NLLRlLQLAq6aVF+p3Kz6o/3rlgDbsbtTOulZNKy/U/Ekj9NkFEzSllMoPwE6XTh4pbXhPr++nEsQpJEEAwIWswegerAShHRaA97OlJvPh4KKxxQzmBDDorMHoLEUAeFhZUZ5WXjNNK6+Z5nQogO/NmzhChiEdONahIy1dKi2iCstu3ms2DwAeELeSIN57maYdFoD3s/VAkyTmgQAYGj0zQciCAACAoVeUF9b08iJJ0mtUgzjCe7trAOABZjussBfbYRm0wwJwelsOMA8EwNDJLkWUohQEAADY5OJsm9+9je0OR+JPJEEAwIUSKe8ORg/QhxvAabTHEtpZ3yJJumRiibPBAPCkgDUY3eFAAACAb+RHMm1+uxO0xXACSRAAcKFE9lN5KOi9l+kAlSAATuON2ial0tLY4jxVFOc7HQ4ADzIPmdAOCwAA2CUayuzvxBJJhyPxJ+/trgGAByRS3m2HZSVBOPwAoB9ba7KtsJgHAmCIWO2wSIIAAACbRLJJECpBnEESBABcyKwECXpwMLrZ4SvJxgOAfjAPBMBQ66lKdTgQAADgGz2VICRBnOC93TUA8ACrEsSDM0FoQQHgVFKptJUEmT+JJAiAoRGgEgQAANiMShBnkQQBABfy8mB0g9OXAE5hT2ObWroSyg8HNb2iyOlwAHhUT2tOFiMAAMAekezM11iSJIgTSIIAgAuZ7bDCnhyMnvmaZOMBwF/YvD9TBTKnssSTr38A3CEQ4EAGAACwVzQclCTF4iRBnMCnSwBwIbMSJOTBwei0wwJwKpsPHJdEKywAQ4t2WAAAwG5mJUg3lSCOIAkCAC6UyL4p0g4LgJ+Y80DmTSQJAmDoMBgdAADYLRo2Z4IkHY7En0iCAIALmZUg4YD3XqbNjQfaYQHorbE1pgPHOmQY0iUkQQAMIXMtQlUqAACwizUThMHojvDe7hoAeIA5E8ST7bBoQQGgH1uyrbCmlRWqKC/scDQAvMxgPhkAALBZJGRWgpAEcQJJEABwoUQq86YY8mA7rJ4WFGw8AOhhDkWnFRaAoUY7LAAAYLdoKDsYnSSII0iCAIALxa1KEO+9TDMTBEB/NmfngTAUHcBQM2eu0Q4LAADYhUoQZ3lvdw0APCDp4UoQM69DJQgAU1c8qbcON0uS5k8c6XA0ALzOoDUnAACwWZQkiKNIggCAC8U9PBPEakFBKQiArDdqmxRPplVaGNX4EflOhwPA42iHBQAA7GYmQWKJpMOR+BNJEABwIXNQZyjgvZfpQICNBwB99W6FZbbMA4ChEmAwOgAAsBntsJzlvd01APAAbw9Gz3ylBQUA05YD5lB0WmEBGHpmJQgzQQAAgF3MwejdSZIgTiAJAgAu5OXB6LTDAtBbKpW2kiDzJzIUHcDQox0WAACwm1kJEk+m2Q9xgPd21wDAA3raYXmxEoSNBwA93mtsU3NnXPnhoGaMLXI6HAA+0LMWYTECAADsYSZBJKpBnEASBABcKJ59Q/TyYPQkGw8A1DMPZHZlscIerH4D4D49rTmdjQMAAPhHtFcSJBYnCWI3PmkCgAslPN0OK/OV05cAJGnzfrMVFvNAANgjEGAmCAAAsFcoYCh7JlSxZNLZYHzIe7trAOABCQ+3wwpaGw8OBwLAFbYcOC5JmjeJeSAA7GFuQCQpBQEAADYxDEOR7EHX7gSVIHYjCQIALpRIZdtheTAJYpjtsNh4AHyvsTWm/cc6ZBjSJRNIggCwR5D5ZAAAwAFmS6wYSRDbkQQBABcyEwRe7I9vPiXaYQHYkp0HMrW0UMX5YYejAeAX5nwy2mEBAAA7RUJBSVSCOMF7u2sA4AHmYPSgBytBejYeHA4EgONohQXACQbzyQAAgAPMShCSIPYjCQIALmQORg8HvZcEoR0WANPmA+ZQdJIgAOwToB0WAABwAO2wnEMSBABcyByMHgx472W6pw83Ow+An3XFk9pxqFmSNH/iSIejAeAnZqFtkrUIAACwUYRKEMd4b3cNADzAGozuwUqQgNWCwtk4ADjrzYPNiifTGlMYVeXIfKfDAeAjZrtRZoIAAAA79VSCJB2OxH9IggCAC1ntsDxYCRLIbjykyIIAvrY5Ow9k/sQRVps8ALCD+ZqT4hAmAACwEZUgzvHe7hoAeEBPOyzvbQwGaIcFQNKW/Zl5IPOYBwLAZqxFAACAE6wkSJIkiN1IggCACyWyb4heHIxOH24AqVRaW2qyQ9EnMQ8EgL1ozQkAAJwQDQUlSbE4SRC7kQQBABcyK0FCQe+9TJunL8mBAP61p7FNTR1x5YeDumhskdPhAPAZKkEAAIATItk9nhiVILbz3u4aAHiAORMk5MV2WAE2HgC/e21fZh7I3AklCnsw2QvA3ViLAAAAJ0TDzARxCp86AcCFEtlJnSEvt8OiBwXgW2YS5LLJtMICYD/aYQEAACdYlSCJpMOR+A9JEABwIasdlgcrQcxh7xy+BPwpnU7r9f3ZJAjzQAA4oKc1J4sRAABgH2swOpUgtiMJAgAu1NMOy3sv0wZ9uAFfO3iiU3XNXQoFDM2dMMLpcAD4kGFVgrAWAQAA9rEGo5MEsZ33dtcAwANohwXAq8xWWBePL1Z+JOhwNAD8yKwEYS0CAADsRCWIc0iCAIALebkSJGhVgjgcCABH0AoLgNOCAdYiAADAfiRBnOO93TUAyHHpdLpnJognK0Howw342WvZJMilJEEAOMSsSmUtAgAA7BQNMRjdKSRBAMBlerdmCHuwEsTsw51k4wHwncbWmPY2tkuS5k9iHggAZxhUpQIAAAdEqQRxjPd21wAgxyV6fSIPerAShBYUgH9tzlaBXFheqJKCiMPRAPCrgJUEYTECAADsYyVBkiRB7EYSBABcJt7rzTAU8F4SxNp4IAsC+A6tsAC4gbm8Yi0CAADsZM4EicVJgtiNJAgAuEzvdlheTIKY7bA4fQn4z2v7skmQySRBADgnQDssAADggAiVII4hCQIALhNP9mqH5cEkSE87LHYeAD9p7YrrnboWSdJlVIIAcFCAtQgAAHBANBSURCWIE0iCAIDLmJUg4aBhDe70kp52WA4HAsBWWw6cUCotTRhZoPLiPKfDAeBjVjssciAAAMBGkWC2HRaVILYjCQIALmPOBPFiFYjEMFLAr15nHggAlzDXImnWIgAAwEZWO6wESRC7kQQBAJdJmJUgAW++RAeYCQL4kjkPZAHzQAA4zCy0TVIKAgAAbBQ1B6Mnkg5H4j/e3GEDgByWzPaJCga9XQmSZN8B8I2ueFJv1DZLYig6AOdRlQoAAJxAJYhzSIIAgMuYg9FDHq0EMdt80YIC8I9tNU3qTqY0pjCqSaMKnA4HgM/1rEUcDgQAAPgKSRDneHOHDQByWCLZMxjdiwzaYQG+s2nvMUlS1XmjZBjefG0DkDtozQkAAJwQDQUlSTGSILYjCQIALpNI+WMwepL3fMA3Nr13VJJ0+fmjHI4EAGQlYxkJAgAA7BSlEsQxJEEAwGWswehBb75E0w4L8JeO7oSqa5skSVUkQQC4ADNBAACAExiM7hxv7rABQA5LWDNBvFkJYnbCSXL8EvCFzftPKJ5Ma1xJviaMZB4IAOdZ7bBYiwAAABuZM0FSaSlBewxbkQQBAJfxSzssTl/iXDz44IOaNGmS8vLytGDBAr322munffwDDzygadOmKT8/X5WVlbrzzjvV1dVlU7T+Zs4D+QDzQAC4RIB2WAAAwAFmEkSSukmC2IokCAC4TM9gdG++RPe0w3I4EOSsxx57TCtXrtTdd9+trVu3avbs2Vq0aJGOHDnS7+MfeeQRffWrX9Xdd9+td955Rw899JAee+wxfe1rX7M5cn96+b1MEoR5IADcggMZAADACZFe+zyxOEkQO3lzhw0Acpg5E8S7lSCZr0k2HnCW7r//ft16661atmyZZsyYoTVr1qigoEAPP/xwv49/+eWXdcUVV+imm27SpEmTdM011+gzn/nM+1aP4Ny1dMW1/WCTJOaBAHCPQPZTMJUgAADATqFgwNrroRLEXiRBAMBlzL6Q4aBXkyCcvsTZ6+7u1pYtW7Rw4ULrvkAgoIULF2rTpk39XnP55Zdry5YtVtJj7969evbZZ3Xddded8vfEYjG1tLT0uWHgXt93XKm0NGlUgcaW5DsdDgBI6lmLpFmLAAAAm5nVIN0JkiB2CjkdAACgr3jKHIzuzTy1lQTh/R5n4ejRo0omkyorK+tzf1lZmXbu3NnvNTfddJOOHj2qD37wg0qn00okErrttttO2w5r9erVuueeewY1dj8yW2FVnT/a4UgAoAdVqQAAwCnRcECd8aRiiaTTofiKN3fYACCHJbPZgRCVIMCg2LBhg+6991794Ac/0NatW/X444/rmWee0Xe+851TXrNq1So1Nzdbt9raWhsj9o5NVhKEVlgA3KPnQAZrEQAAYC+zEiRGJYitqAQBAJeJJ81KEI8mQaw+3Gw8YOBGjx6tYDCohoaGPvc3NDSovLy832u++c1v6vOf/7xuueUWSdLFF1+s9vZ2/d3f/Z2+/vWvK9BP1VU0GlU0Gh38J+AjJ9q79XZdpo1Y1XkkQQC4R087LIcDAQAAvhMNkwRxApUgAOAySbMdVtCbL9E9lSAOB4KcFIlENG/ePK1fv966L5VKaf369aqqqur3mo6OjpMSHcFgUBL94IfSK3szVSAXlA7XmEISSgDcg6pUAADgFGaCOINKEABwGXMwumcrQWhBgXO0cuVK3XzzzZo/f74uu+wyPfDAA2pvb9eyZcskSUuXLtW4ceO0evVqSdKSJUt0//33a+7cuVqwYIH27Nmjb37zm1qyZImVDMHg25RNglxOKywALpNdinAgAwAA2C4SynwGJQliL5IgAOAyVjssj1aCBGmHhXN04403qrGxUXfddZfq6+s1Z84crVu3zhqWXlNT06fy4xvf+IYMw9A3vvENHTp0SGPGjNGSJUv03e9+16mn4AsMRQfgVoHsQRMGowMAALtFQ7TDcgJJEABwGasdlkcrQYzs8cskxy9xDlasWKEVK1b0+7MNGzb0+T4UCunuu+/W3XffbUNkkKQjrV3ac6RNhiF94LyRTocDAH0ErZkgrEUAAIC9IiHaYTnBm8eMASCHxVPebocVZBgp4Hkv78lUgUwvL1JJQcThaACgrwDtsAAAgEPMSpDuZNLhSPyFJAgAuEzS4+2wGEYKeN+GXUckSR+ZNsbhSADgZAZrEQAA4BCrHVacShA7eXOHDQByWNzz7bAyX+nDDXhTKpXWxnePSpI+MpUkCAD3MZdY6TQtsQAAgL2sdlhJkiB2IgkCAC6TyL4RhoLeTIIEA+bpS4cDATAkth9q1vH2bg2PhjRv4ginwwGAk5hVqRLrEQAAYK9oKCiJShC7kQQBAJcxB4aHPd4Oi5OXgDdt2NUoSbpiyijPvo4ByG2BQO8kCOsRDMzq1at16aWXqrCwUKWlpbrhhhu0a9eu973uV7/6lS688ELl5eXp4osv1rPPPmtDtAAAt4kEqQRxAp9MAcBl4tmZIEGPtsMyn1aSo5eAJ724OzMP5MpppQ5HAgD9673EIgmCgXrxxRe1fPlyvfLKK3r++ecVj8d1zTXXqL29/ZTXvPzyy/rMZz6jL37xi9q2bZtuuOEG3XDDDdqxY4eNkQMA3MBshxVLkASxU8jpAAAAfSVSmTfCsFeTILTDAjyrqaNb1bVNkpgHAsC9erfDIgeCgVq3bl2f79euXavS0lJt2bJFH/7wh/u95vvf/76uvfZaffnLX5Ykfec739Hzzz+vf//3f9eaNWuGPGYAgHtYg9ETSYcj8RcqQQDAZRIpsxLEmy/RffpwkwkBPOXF3Y1KpaWpZcM1tiTf6XAAoF99Z4KwFsG5aW5uliSNHDnylI/ZtGmTFi5c2Oe+RYsWadOmTae8JhaLqaWlpc8NAJD7rMHoVILYyps7bACQw7w+GJ0WFIB3/eHtBknSRy8sczgSADi1XjkQ2nPinKRSKX3pS1/SFVdcoZkzZ57ycfX19Sor6/veWFZWpvr6+lNes3r1ahUXF1u3ysrKQYsbAOAcs/U5h0LtRRIEAFwmYQ1G92gSpM8wUgcDATCouuJJ/WlnZh7I4pnlDkcDAKfWtxLEwUCQ85YvX64dO3bo0UcfHfS/e9WqVWpubrZutbW1g/47AAD2M9chSQ6F2oqZIADgMomkj9ph8aYPeMZ/v3tUHd1JVRTnadb4YqfDAYBTCgZ6zwRhLYKzs2LFCj399NPauHGjxo8ff9rHlpeXq6Ghoc99DQ0NKi8/9aGBaDSqaDQ6KLECANzDXIck6YZlq7PaYXvwwQc1adIk5eXlacGCBXrttddO+/gHHnhA06ZNU35+viorK3XnnXeqq6vrrAIGAK+zBqN7tBIkSBIE8KR1OzItPRZdVC7D8ObrFwBv6Nua07k4kJvS6bRWrFih3/72t3rhhRc0efLk972mqqpK69ev73Pf888/r6qqqqEKEwDgUrTDcsaAkyCPPfaYVq5cqbvvvltbt27V7NmztWjRIh05cqTfxz/yyCP66le/qrvvvlvvvPOOHnroIT322GP62te+ds7BA4AX9VSCeHMT0WDjAfCceDKlP76TOeF6La2wALicwYEMnIPly5fr5z//uR555BEVFhaqvr5e9fX16uzstB6zdOlSrVq1yvr+jjvu0Lp163Tfffdp586d+ta3vqXNmzdrxYoVTjwFAICDaIfljAEnQe6//37deuutWrZsmWbMmKE1a9aooKBADz/8cL+Pf/nll3XFFVfopptu0qRJk3TNNdfoM5/5zPtWjwCAX1kzQXzQDothpIA3vLr3uJo74xo1LKJLJ410OhwAeF/mWRNOYWKgfvjDH6q5uVlXXnmlKioqrNtjjz1mPaampkZ1dXXW95dffrkeeeQR/fjHP9bs2bP161//Wk888cRph6kDALwpmN3qYQ1irwHNBOnu7taWLVv6nGgIBAJauHChNm3a1O81l19+uX7+85/rtdde02WXXaa9e/fq2Wef1ec///lT/p5YLKZYLGZ939LSMpAwASCnxbONIUNebYdFH27Acx7fdlCSdM1FZZ6tYgPgLQHDUCqdpioVA3Ym69cNGzacdN+nPvUpfepTnxqCiAAAuYRKEGcMKAly9OhRJZNJlZWV9bm/rKxMO3fu7Peam266SUePHtUHP/hBpdNpJRIJ3Xbbbadth7V69Wrdc889AwkNADzDrI7w6kYifbgBb2nujOvZ7ZnTrn8zr9LhaADgzAQChpRK0w4LAADYqmcwOmsQOw15r5UNGzbo3nvv1Q9+8ANt3bpVjz/+uJ555hl95zvfOeU1q1atUnNzs3Wrra0d6jABwDXMmSDhoDfbYRm0wwI85XdvHFZXPKWpZcN1yYQSp8MBgDNitcMiCQIAAGxEEsQZA6oEGT16tILBoBoaGvrc39DQoPLy/odgfvOb39TnP/953XLLLZKkiy++WO3t7fq7v/s7ff3rX1egn5730WhU0Wh0IKEBgGckUtl2WB6tBJEyb/rJVJp2WIAHPPpajSTpxksn9ElyAoCbma0oWIoAAAA7kQRxxoCOGUciEc2bN0/r16+37kulUlq/fr2qqqr6vaajo+OkREcwGJREL3gA6I85GN2rM0GkntOX9MAEctuOQ81663CLIsGA/nruOKfDAYAzZiZBqAQBAAB2CrIGccSAKkEkaeXKlbr55ps1f/58XXbZZXrggQfU3t6uZcuWSZKWLl2qcePGafXq1ZKkJUuW6P7779fcuXO1YMEC7dmzR9/85je1ZMkSKxkCAOgRz7bDCvVTKecVmdPiDCMFct1P/3uvJGnRzHKNGBZxOBoAOHNm4RqnMAEAgJ0CVII4YsBJkBtvvFGNjY266667VF9frzlz5mjdunXWsPSampo+lR/f+MY3ZBiGvvGNb+jQoUMaM2aMlixZou9+97uD9ywAwEOSfmiHZZ584E0fyFlvHW7Wk28cliT93YfOczgaABgYsxUFSxEAAGAncz8kyRrEVgNOgkjSihUrtGLFin5/tmHDhr6/IBTS3XffrbvvvvtsfhUA+I45GD3k0cHoEsNIAS/4p9/vVDot/dXssbp4fLHT4QDAgPTMBGEtAgAA7GMdxOAkhq28u8MGADkqblaCeHkmCKcvgZz23+826r/fPapw0NA/XDPN6XAAYMB6DmQ4GwcAAPAX2mE5gyQIALhM0poJ4uEkCIPAgJx14Fi77nysWpL02QUTNWFUgbMBAcBZMFiLAAAAB/S0w2INYieSIADgMvGU9wejW6cvOfkA5JQjrV36/EOv6Whbt2ZUFOl/XTPV6ZAA4KwEGIwOAAAcYHY+Zz/EXmc1EwQAMHTMD+NhD7fDYhgpkFuOtHbpl6/X6scb96qlK6HKkfla+4VLVZgXdjo0ADgrQWsmiMOBAAAAXwlQCeIIkiAA4DLxZGYmSNDD7bBoQQG/e+qNw/rTriM9d6T7/WPm+7/47yTd52c65c/+8tqT/ms76dq+d3TFU2rpjKv2RIcaWmLW/ReWF2rN5+aptDDvL/9GAMgZrEUAAIATGIzuDJIgAOAyiaRZCeL9dli0oIBf7TjUrMe3HnI6jDNmGNL08iL9vx85T5+YNdbTSVoA/mB2HSUJAgAA7GQNRmcNYiuSIADgMmZiwMubjLSggN9ddWGpRg2P9LnPUN//5o33eQkw/uIBf/nwv7z+5J+f/At63xUJBlScH1ZpUVQXlhdpWJRlIwDvCBi05gQAAPazBqOnHA7EZ/g0CwAuE09l3glDHp4JYtADEz73gfNG6QPnjXI6DADwrQDtsAAAgANoh+UM7/ZaAYAclEqlreqIcMC7L9G0oAAAAE4yC27ZgAAAAHZiMLozvLvDBgA5yKwCkaSghytBetph8aYPAADsRzssAADgBCpBnEESBABcxByKLnm8EoQemAAAwEEBDmQAAAAHBLNbPVSC2Mu7O2wAkIMSvU4CeHkmSCBAH24AAOAcw2yHxVIEAADYqOdQKIsQO5EEAQAXSfQqjQgFPJwEsTYeeNMHAAD2ox83AABwgtkOiySIvUiCAICLmJUgwYAhw/ByEsTsgelwIAAAwJeCVKUCAAAHkARxBkkQAHCR3kkQL+sZRsqbPgAAsJ+51GImCAAAsBMHMZxBEgQAXMRshxX2ehIk++7Dmz4AAHCCQVUqAABwQJCZII4gCQIALmJWgoSC3n55phIEAAA4iflkAADACQHaYTnC27tsAJBjEslsEsTrlSCcvgQAAA7iQAYAAHBC0FqDOByIz5AEAQAXiWfbYYWCXk+CZL4m2XgAAAAOCATYgAAAAPZjMLozSIIAgIuYb4KhgLdfns03fYaRAgAAJ9AOCwAAOMFqh8UaxFbe3mUDgByTSPmjEsSg/BMAADgowFoEAAA4wGqHxSLEViRBAMBF4r6ZCZL5SvknAABwgpkEoSoVAADYyWz8QSWIvUiCAICL+K0dFi0oAACAEwwOZAAAAAcErYMYHMawk7d32QAgx/hnMHrPmz4AAIDdaIcFAACcEOzV+YPDGPYhCQIALmJVggS9/fJszgThDR8AADiBqlQAAOCEQO8kCOsQ23h7lw0AcoxfZoKYhS5sPAAAACeYSy3aUAAAADuZ7bAkKZVyMBCfIQkCAC6SyL4Dej0J0tOCgo0HAABgP4N2WAAAwAFBKkEcQRIEAFykpx2Wt5MgbDwAAAAnBahKBQAADggYzARxAkkQAHCRnnZY3n55NkeesPEAAACcYFWlsvkAAABs1LsShHWIfby9ywYAOSaRzLTDCnu8EoSNBwAA4KRAgKpUAABgv97dz2mHZR+SIADgIonsJ/Gg12eCsPEAAAAcxHwyAADgBMMwrEQI7bDsQxIEAFzErAQJBb398szGAwAAcFLPTBBn4wAAAP5jtkAnCWIfb++yAUCOMStBwl6vBOHUAwAAcJB5ICPNgQwAAGAzcwwseyL2IQkCAC7S0w7L2y/PQWvjweFAAACALxkcyAAAAA4J0h3Ddt7eZQOAHOOXwegGb/g4Rw8++KAmTZqkvLw8LViwQK+99tppH9/U1KTly5eroqJC0WhUU6dO1bPPPmtTtAAAt+nZfHA4EAAA4DvmnFQOY9gn5HQAAIAevhmMbp6+JAmCs/DYY49p5cqVWrNmjRYsWKAHHnhAixYt0q5du1RaWnrS47u7u3X11VertLRUv/71rzVu3DgdOHBAJSUl9gcPAHAF5pMBAACnmHs+rEPsQxIEAFwkkczOBPH4YHTzDZ/3e5yN+++/X7feequWLVsmSVqzZo2eeeYZPfzww/rqV7960uMffvhhHT9+XC+//LLC4bAkadKkSXaGDABwGbPzKDNBAACA3cyK1GwzENjA27tsAJBjzEqQkMcrQQyD0k+cne7ubm3ZskULFy607gsEAlq4cKE2bdrU7zW/+93vVFVVpeXLl6usrEwzZ87Uvffeq2QyecrfE4vF1NLS0ucGAPAOg3ZYAADAIbTDsh9JEABwEXMmSNDjM0HMHA+lnxioo0ePKplMqqysrM/9ZWVlqq+v7/eavXv36te//rWSyaSeffZZffOb39R9992n//2///cpf8/q1atVXFxs3SorKwf1eQAAnBVgMDoAAHAIg9HtRxIEAFzErAQJB7z98tzT/9LhQOALqVRKpaWl+vGPf6x58+bpxhtv1Ne//nWtWbPmlNesWrVKzc3N1q22ttbGiAEAQ83cfKAdFgAAsFuQShDbMRMEAFwkkcpUgoQ8XwmSTYLwho8BGj16tILBoBoaGvrc39DQoPLy8n6vqaioUDgcVjAYtO6bPn266uvr1d3drUgkctI10WhU0Wh0cIMHALgG7bAAAIBTzHOvSQ5j2MbbR40BIMeYg9G9PhPETILwho+BikQimjdvntavX2/dl0qltH79elVVVfV7zRVXXKE9e/YoleqZOrd7925VVFT0mwABAHhfgDYUAADAIUEOhtqOJAgAuEjcTIIEvf3ybFa6UPqJs7Fy5Ur95Cc/0c9+9jO98847uv3229Xe3q5ly5ZJkpYuXapVq1ZZj7/99tt1/Phx3XHHHdq9e7eeeeYZ3XvvvVq+fLlTTwEA4LCe+WTOxgEAAPyHwej2ox0WALhI0myH5fFKEPP5xZOp93kkcLIbb7xRjY2Nuuuuu1RfX685c+Zo3bp11rD0mpoaBXrN1amsrNRzzz2nO++8U7NmzdK4ceN0xx136B//8R+degoAAIcFAlSCAAAAZwTpjmE7kiAA4CLxlD/aYZmVLmb7L2CgVqxYoRUrVvT7sw0bNpx0X1VVlV555ZUhjgoAkCuYTwYAAJxiDkZPcS7UNt7utwIAOSbpk3ZY4ewbfoJ3fAAA4ADaYQEAAKcwJ9V+3t5lA4Ack/BJO6xwKPP2053gDR8AANiPwegAAMApPZUgrEPsQhIEAFzEN4PRqQQBAAAOMs+bpEmCAAAAmzEY3X7e3mUDgByT9MlMkDAzQQAAgIMM2lAAAACHBLNbPgmSILYhCQIALhJPZtthBb2dBDGfn/l8AQAA7BS0TmA6HAgAAPCdUCCzJU9bTvuQBAEAF0lYlSDefnk2K0FIggAAACfQixsAADjF3PKhHZZ9vL3LBgA5JuGbdljmTBDe8AEAgP0CtMMCAAAOsQ5jsA6xDUkQAHCRhF/aYQWoBAEAAM7JFqVSCQIAAGxnHcZgHWIbkiAA4CLmG6DZLsqrGIwOAACcRCUIAABwSs9sMtYhdvH2LhsA5BizMiLok3ZYVIIAAAAnmK1Hac0JAADsFjRoh2U3kiAA4CIJqxLE20mQkDUYnTd8AABgPwajAwAApwSsShCHA/ERkiAA4CJme6hgwNsvz2Hr9CXv+AAAwH4B2lAAAACHBGnLaTtv77IBQI4x20NFvD4TJEQlCAAAcA5tKAAAgFOoSLWft3fZACDHWEmQkMfbYQWYCQIAAJxDJQgAAHAK6xD7kQQBABcxKyNCXm+Hla10SVAJAgAAHNDThsLhQAAAgO+YY2CpSLWPt3fZACDHdGcrI8x2UV4VCjITBAAAOIc2FAAAwClUgtjP27tsAJBjEmYSJOjtdlhmJUh3giQIAACwH5sPAADAKQxGtx9JEABwiWQqLfNzuOcHo2fbfSXYeAAAAA5g8wEAADiFilT7eXuXDQBySO8h4SGPJ0Gsdlg04gYAAA4IUgkCAAAc0lOR6nAgPuLtXTYAyCHdvd79vN4Oy0yCxFMppTmBCQAAbEYSBAAAOIWKVPuRBAEAl+hdFWG2i/Iqs91XOs3mAwAAsJ9ZdJti8wEAANiMdlj28/YuGwDkELMdVihgWKWRXtW73RdzQQAAgN0CBpUgAADAGeY6hP0Q+5AEAQCX6E5kkiBhj88DkTKJHlOcJpgAAMBmtMMCAABOMVuEU5FqH+/vtAFAjrAqQTw+D0Tqm+hhODoAALCb2YubzQcAAGA3KlLtRxIEAFwink0GRHxQCRIMGDKLQagEAQAAdgtQCQIAABxibvuwDrGP93faACBHmMkAP7TDknrmgsR50wcAADazBpKyDAEAADajItV+/thpA4AcYCVBQt5vhyVJ4ezmQ4JKEAAAYDNmguBcbNy4UUuWLNHYsWNlGIaeeOKJ0z5+w4YNMgzjpFt9fb09AQMAXIWKVPuRBAEAlzDbYYUD/nhptipBmAkCAABsFqQXN85Be3u7Zs+erQcffHBA1+3atUt1dXXWrbS0dIgiBAC4GZUg9gs5HQAAIMNv7bDCVhKEShAAAGAvKkFwLhYvXqzFixcP+LrS0lKVlJQMfkAAgJxCJYj9/LHTBgA5wHftsIJmOyze9AEAgL0CZiUIJzBhozlz5qiiokJXX321/vznP5/2sbFYTC0tLX1uAABv6DmM4XAgPkISBABcwmqH5ZNKkFA2CRJP8a4PAADsZQ1G5wQmbFBRUaE1a9boN7/5jX7zm9+osrJSV155pbZu3XrKa1avXq3i4mLrVllZaWPEAIChRDss+9EOCwBcwnftsLKzT+IJkiAAAMBe5nKLShDYYdq0aZo2bZr1/eWXX6733ntP3/ve9/R//+//7feaVatWaeXKldb3LS0tJEIAwCNoh2U/kiAA4BI9SRC/tMPK7D4keNMHAAA2CzAYHQ677LLL9NJLL53y59FoVNFo1MaIAAB2Mbd9OIxhH38cNwaAHNCd8FcliNUOiyaYAADAZrTDgtOqq6tVUVHhdBgAAAewDrEflSAA4BJmRYR/kiDZShAGowMAAJsxGB3noq2tTXv27LG+37dvn6qrqzVy5EhNmDBBq1at0qFDh/Sf//mfkqQHHnhAkydP1kUXXaSuri799Kc/1QsvvKA//OEPTj0FAICDaIdlP5IgAOASZkVExCdJkHCAShAAAOAMsyI1xTIEZ2Hz5s266qqrrO/N2R0333yz1q5dq7q6OtXU1Fg/7+7u1v/6X/9Lhw4dUkFBgWbNmqU//vGPff4OAIB/MBjdfiRBAMAlzHZYIZ/NBIlz8gEAANjM3HxIkAXBWbjyyiuVPs3G1dq1a/t8/5WvfEVf+cpXhjgqAECuoBLEfv44bgwAOSCe9Fs7rOzmA5UgAADAZubmQyqt025mAwAADLag1ZbT4UB8xB87bQCQA8y2UH5JgoSZCQIAABxibj5ImUQIAACAXRiMbj9/7LQBQA5IWDNB/NEOK5R90++mEgQAANjMrASRaEUBAADsZa5DaMtpH5IgAOAS3T5rhxUOmZUgvOkDAAB7BQO9K0FIggAAAPtYg9HZDrGNP3baACAHmO2wQn5JglgnH9h4AAAA9urdDotKEAAAYCfzMEaSgxi28cdOGwDkgLjf2mFlkz1xZoIAAACbBXp9EmYDAgAA2MlKgnAQwzYkQQDAJeJ+a4eVTfbEaYcFAABs1mcwOhsQAADARua2Dy057eOPnTYAyAFmMsCcleF1ZrKHmSAAAMBuvWeC0JoTAADYKWBQCWI3f+y0AUAOsGaCBHzSDivbhyLOmz4AALCZYRgyl1xUggAAADvRDst+JEEAwCWsmSC+qQTJDkanEgQAADiAoaQAAMAJZltO2mHZxx87bQCQA7oTfpsJwmB0AADgHFpRAAAAJwSoBLGdP3baACAHJFLZmSA+SYKEGIwOAAAcZFaCpFiKAAAAG1lrEHIgtvHHThsA5ABrMHrQHzNBegaj864PAADsZ7aioB0WAACwE9Wo9iMJAgAuEfdZOyxzAHyc45cAAMABtKIAAABOYDC6/c5qp+3BBx/UpEmTlJeXpwULFui111477eObmpq0fPlyVVRUKBqNaurUqXr22WfPKmAA8KrupL/aYTETBAAAOKmnFQVrEQAAYB8Go9svNNALHnvsMa1cuVJr1qzRggUL9MADD2jRokXatWuXSktLT3p8d3e3rr76apWWlurXv/61xo0bpwMHDqikpGQw4gcAz+iZCeKXdliZ55lgJggAAHAArSgAAIATAtmzr6xB7DPgJMj999+vW2+9VcuWLZMkrVmzRs8884wefvhhffWrXz3p8Q8//LCOHz+ul19+WeFwWJI0adKkc4saADzIbIcV8UklSIhKEAAA4KAQrSgAAIADqEa134B22rq7u7VlyxYtXLiw5y8IBLRw4UJt2rSp32t+97vfqaqqSsuXL1dZWZlmzpype++9V8lk8twiBwCPMQejh/ySBDFnglAJAgAAHEA/bgAA4IQg1ai2G1AlyNGjR5VMJlVWVtbn/rKyMu3cubPfa/bu3asXXnhBn/3sZ/Xss89qz549+vu//3vF43Hdfffd/V4Ti8UUi8Ws71taWgYSJgDkpJ6ZIP5ohxUJZZI9CQajAwAAB1itKDiFCQAAbBTgIIbthvy4cSqVUmlpqX784x9r3rx5uvHGG/X1r39da9asOeU1q1evVnFxsXWrrKwc6jABwHFxnw1GDwVoh4Wz9+CDD2rSpEnKy8vTggUL9Nprr53RdY8++qgMw9ANN9wwtAECAFzPGkrKBgQAALBRz2B0hwPxkQHttI0ePVrBYFANDQ197m9oaFB5eXm/11RUVGjq1KkKBoPWfdOnT1d9fb26u7v7vWbVqlVqbm62brW1tQMJEwByUiKbDDArJLwuxGB0nKXHHntMK1eu1N13362tW7dq9uzZWrRokY4cOXLa6/bv369/+Id/0Ic+9CGbIgUAuBmnMAEAgBNoyWm/Ae20RSIRzZs3T+vXr7fuS6VSWr9+vaqqqvq95oorrtCePXuU6tXuZPfu3aqoqFAkEun3mmg0qqKioj43APC6bp9Vgphtv6gEwUDdf//9uvXWW7Vs2TLNmDFDa9asUUFBgR5++OFTXpNMJvXZz35W99xzj8477zwbowUAuJXVj5t2WAAAwEYcxLDfgHfaVq5cqZ/85Cf62c9+pnfeeUe333672tvbtWzZMknS0qVLtWrVKuvxt99+u44fP6477rhDu3fv1jPPPKN7771Xy5cvH7xnAQAeYA1GD/hjJoiZ7GEwOgaiu7tbW7Zs0cKFC637AoGAFi5cqE2bNp3yum9/+9sqLS3VF7/4RTvCBADkAPMUJuPJAACAncx9Hw5i2GdAg9El6cYbb1RjY6Puuusu1dfXa86cOVq3bp01LL2mpkaBQE9upbKyUs8995zuvPNOzZo1S+PGjdMdd9yhf/zHfxy8ZwEAHhD3WzusgDkYnTd9nLmjR48qmUxa6w5TWVmZdu7c2e81L730kh566CFVV1ef8e+JxWKKxWLW9y0tLWcVLwDAvQJUggAAAAdYaxD2Q2wz4CSIJK1YsUIrVqzo92cbNmw46b6qqiq98sorZ/OrAMAXUqm09ebnt3ZYzATBUGptbdXnP/95/eQnP9Ho0aPP+LrVq1frnnvuGcLIAABO66kEYQMCAADYJ9irA0gqlbbaY2HonFUSBAAwuOK9+jCYyQGvC1ntsNh4wJkbPXq0gsGgGhoa+tzf0NCg8vLykx7/3nvvaf/+/VqyZIl1nzmnLBQKadeuXTr//PNPum7VqlVauXKl9X1LS4sqKysH62kAAFzA3ICgKhUAANjJnEsmZSpSA/LHPpCTSIIAgAv0TgT4rRKEmSAYiEgkonnz5mn9+vW64YYbJGWSGuvXr++3SvXCCy/U9u3b+9z3jW98Q62trfr+979/ysRGNBpVNBod9PgBAO4RZCgpAABwQK9JEkqm0goHnYvFL0iCAIALxBO9K0H8kgRhJgjOzsqVK3XzzTdr/vz5uuyyy/TAAw+ovb1dy5YtkyQtXbpU48aN0+rVq5WXl6eZM2f2ub6kpESSTrofAOAv5inMFDNBAACAjfq0w2IdYguSIADgAmY1RMDo+2boZaEAlSA4OzfeeKMaGxt11113qb6+XnPmzNG6deusYek1NTUKBPyRTAQAnD3zrYJKEAAAYKdA73ZYrENsQRIEAFwg7rOh6FLPcyUJgrOxYsWKfttfSdKGDRtOe+3atWsHPyAAQM6xBqNzAhMAANio72B0BwPxEf/stgGAi5ntsCI+TIIkGIwOAAAcYJ7C5AQmAACw018ORsfQ889uGwC4mFkNEQr6oxWW1PNcE6m00rzpAwAAmzEYHQAAOCEQoB2W3UiCAIALdGeTIL5qh9VrZgPD0QEAgN0YjA4AAJxCW057+We3DQBcLJ7030yQ3lUvzAUBAAB2C1iVIA4HAgAAfCdIW05b+We3DQBcLJH99B0J+edluXfCJ85cEAAAYLOQlQQhCwIAAOxlNscgCWIP/+y2AYCL9bTD8s9MkN7PNcERTAAAYLMAM0EAAIBDaMtpL5IgAOACZiVEKOCfl2XDMKwemMwEAQAAdrPaULAMAQAANuMwhr38s9sGAC4WT2QrQXzUDkvqaUPRnaASBAAA2MsaSMrmAwAAsBmD0e3lr902AHCpRLYXdcRH7bAkKZKdC0IlCAAAsFvAqgRhHQIAAOzVMxjd4UB8giQIALhAd7YPQ+9h4X4QyiZ9mAkCAADsFmQgKQAAcEjAag/Ofogd/LXbBgAuZbbDCvkuCZJ5vt0kQQAAgM1ohwUAAJwSstYhDgfiE/7abQMAl4on/dkOK2yefGAiKQAAsBntsAAAgFNYh9iLJAgAuICZBPFbOyxzEDzlnwAAwG5UggAAAKeY6xDactrDX7ttAOBScb/OBMm+6cepBAEAADYLWr24WYcAAAB7WYcxqASxhb922wDApXxbCZJ9vnFmggAAAJsFaUMBAAAcks2BUAliE3/ttgGAS/UkQfw1EyQUZCYIAABwBu2wAACAU1iH2IskCAC4QLdP22FRCQIAAJwSsHpxOxwIAADwHQaj28tfu20A4FIJv7bDCpiD0XnTBwAA9jLbYdGLGwAA2I3B6Pby124bALiU1Q4r5M92WFSCAAAAuwXYfAAAAA5hMLq9SIIAgAvEzXZYAX+9LIesdli86QMAAHsxGB0AADjFaofFmVBb+Gu3DQBcqtun7bAi1mB03vUBAIC9zGUXA0kBAIDdaIdlL3/ttgGAS8UTPm2Hla18ifOmDwAAbEY7LAAA4BRmk9mLJAgAuIA5GDzis0oQayZIgkoQAABgrxBJEAAA4BCzGzrrEHv4a7cNAFzKv+2wMs83kSIJAgAA7BVgJggAAHAIg9Ht5a/dNgBwKbMSwqyM8AurEoTB6AAAwGb04gYAAE7pGYzOOsQOJEEAwAXiPq0ECZmVICRBAACAzTiBCQAAnMJhDHv5a7cNAFzKrzNBwgGzEoR2WAAAwF6cwAQAAE5hMLq9/LXbBgAu1Z3wZyWI+XzjzAQBAAA26zmB6XAgAADAdwLZdUiCwxi28NduGwC4lFkJ4b+ZILTDAgAAzuAEJgAAcErIbMtJEsQWJEEAwAXMweC+a4cVpB0WAABwRoBe3AAAwCGsQ+zlr902AHAp3w5GD2TbYVEJAgAAbGYuu9h8AAAAdjMrUtkOsYe/dtsAwKV6kiD+aodVnB+SJDW2xhyOBAAA+E0wexiDJAgAALBbkHZYtiIJAgAuYFZChEP+elmeMbZYkrTjULPDkQAAAL/pOYHJ5gMAALBXgHWIrfy12wYALmVVggT89bJ80dgiGYZU39KlI61dTocDAAB8xGyHxQlMAABgN9py2stfu20A4FJWEiTkr3ZYw6IhnT9muCSqQQAAgL04gQkAAJxCOyx7kQQBABfoTvhzMLokzRqXaYm1/WCLw5EAAAA/YfMBAAA4hcMY9vLfbhsAuJA5EyTiwyTITDMJcqjJ2UCA/7+9e4+Pqr7zP/6eM5OZSSQJYEgCGOWmIoKAIDEopXazTatF7U1WLVCqWBfptmbbKtqC1dZQq5ZdxaIgpRcp6C5eVvlhEcvDIlEqkJZyU+QuJBCFTMhtkpnz+yOZCRGiSTxzzmTm9Xw85iE5nJN85mvIfDKf8/18AABJxTB48wEAADiDmzHslXzvtgFAnAmHTdU1hiRJ/hS3w9HY75JzIkUQ2mEBAAD7RAejhx0OBAAAJB12gtiLIggAOKyhqfU37zRv8hVBhvXLkOGSKgINOhpgODoAALAHd2ACAACnRPIQbsawB0UQAHBYbbAp+ufUJNwJkub1aEh283B0doMAAAC7RO7AbArz7gMAALBX9GYMdoLYgiIIADisNtjcCsvnMaK9qZNNZC7IW3s+lEkCAAAAbOBxR958cDgQAACQdKLtsEhEbOFxOgAASHaReSDJ2AorYlReT63c/IEW/XWv1u48qvOze8gll1wuNT+if+7eRaKzvG7N+/olTocBAADEmw8AAMA57patCeQh9qAIAgAOqwtGiiDJ+yP5+tH9tfVQlV7+xxHtOVajPcdqnA4pJnqmpVAEAQAgTrT24ubNBwAAYC+3i3ZYdkred9wAIE5E2mGlJvFOkAx/in71zZGae+3Fen3nUQXqGmVKkmnKbP5PQiQGXg9dKAEAiBe8+QAAAJxicDOGrSiCAIDD6hqbB6Mn41D0j+vh8+jakf2cDgMAACQBgzYUAADAIdyMYS9uSQUAh9UFw5KSeycIAACA3SLtsHjzAQAA2C2yE6QpRB5iB4ogAOCw2mDzTpBkHowOAABgNzeD0QEAgEM8kXZY3IxhC4ogAOCwusaWmSC0wwIAALANvbgBAIBTojtSyUNsQREEABxWx2B0AAAA27ETBF31xhtvaNKkSerXr59cLpdeeOGFT71m3bp1uvTSS+Xz+TRkyBAtXbo05nECAOJXZCdII3mILSiCAIDDaluKILTDAgAAsI+bNhToopqaGo0cOVILFizo0Pl79+7VNddco6uuukplZWX6wQ9+oFtvvVWvvvpqjCMFAMQrr6f5PaBgU9jhSJKDx+kAACDZ0Q4LAADAfq1tKBwOBN3Ol7/8ZX35y1/u8PkLFy7UwIED9cgjj0iSLrroIq1fv16//vWvVVRUFKswAQBxzOtp3pvQQBHEFuwEAQCHtbbDoi4NAABgF3aCwC6lpaUqLCxsc6yoqEilpaXtXtPQ0KBAINDmAQBIHL6WIkiwKeRwJMmBIggAOIx2WAAAAPYzmAkCm5SXlysnJ6fNsZycHAUCAdXV1Z3xmpKSEmVmZkYfeXl5doQKALCJN1oEYSeIHSiCAIDD6hqbJNEOCwAAwE6RnSCSFKYQgjgze/ZsVVVVRR8HDx50OiQAgIWiRZAQRRA70HsFABxWG22HRREEAADALm5XaxEkZJoy5PqEs4Guy83NVUVFRZtjFRUVysjIUGpq6hmv8fl88vl8doQHAHBApB1WQyNFEDuwEwQAHFZHOywAAADbGaf8NkxLLMRSQUGB1q5d2+bYmjVrVFBQ4FBEAACn+dgJYiuKIADgsLpGiiAAAAB2a9MOi+Ho6ISTJ0+qrKxMZWVlkqS9e/eqrKxMBw4ckNTcymrq1KnR82+//Xbt2bNHP/7xj7Vz50498cQTevbZZ3XnnXc6ET4AIA543c3vATETxB4UQQDAYZF2WH5mggAdtmDBAg0YMEB+v1/5+fnauHFju+cuWrRIEyZMUK9evdSrVy8VFhZ+4vkAgORgnNIOq4mdIOiEd955R6NHj9bo0aMlScXFxRo9erTmzJkjSTpy5Ei0ICJJAwcO1CuvvKI1a9Zo5MiReuSRR7R48WIVFRU5Ej8AwHmRmSANFEFswUwQAHBYazssfiQDHbFixQoVFxdr4cKFys/P1/z581VUVKRdu3YpOzv7tPPXrVunG2+8UePHj5ff79cvf/lLffGLX9S2bdvUv39/B54BACAeeBiMji76/Oc/L/MTdg8tXbr0jNds2bIlhlEBALqTaDssiiC2YCcIADiMdlhA5zz66KOaMWOGpk+frmHDhmnhwoVKS0vTkiVLznj+M888o5kzZ2rUqFEaOnSoFi9erHA4fFpvbgBAcjm1HRYzQQAAgJ28FEFsRREEABxWG2ySJKXSDgv4VMFgUJs2bVJhYWH0mGEYKiwsVGlpaYc+R21trRobG9W7d+92z2loaFAgEGjzAAAkFpfLpUhHrBAzQQAAgI28pwxG/6TdhbAGRRAAcFA4bKq+sbnqn8pOEOBTVVZWKhQKKScnp83xnJwclZeXd+hz3HXXXerXr1+bQsrHlZSUKDMzM/rIy8v7THEDAOKTu6UKEuYmTAAAYKNIOyyJuSB2oAgCAA6qbwpF/0w7LCD25s2bp+XLl+v555+X3+9v97zZs2erqqoq+jh48KCNUQIA7GK0tMRiJwgAALCT95QiSDBEESTWmMILAA6qDbYWQfweiiDAp8nKypLb7VZFRUWb4xUVFcrNzf3Eax9++GHNmzdPr732mi655JJPPNfn88nn833meAEA8a11JwhFEAAAYB+v+5QiCDtBYo6dIADgoLqWIog/xYjeiQigfV6vV2PGjGkz1Dwy5LygoKDd6x566CE98MADWr16tcaOHWtHqACAbiAyHJ3B6AAAwE4ulytaCKEdVuyxEwQAHFTX2FwESfPy4xjoqOLiYk2bNk1jx47VuHHjNH/+fNXU1Gj69OmSpKlTp6p///4qKSmRJP3yl7/UnDlztGzZMg0YMCA6O6RHjx7q0aOHY88DAOA8g8HoAADAIT6PoWAozE4QG/CuGwA4KNIOKzWFVlhAR02ePFnHjh3TnDlzVF5erlGjRmn16tXRYekHDhyQYbRudv3Nb36jYDCob3zjG20+z9y5c3XffffZGToAIM6wEwQAADjF6zGkBtph2YEiCAA4KNIOK5Wh6ECnzJo1S7NmzTrj361bt67Nx/v27Yt9QACAbsndUjSnCAIAAOwWGY5OEST2mAkCAA6qa2ySJKVRBAEAALBdZCYpRRAAAGA3nycyEyTkcCSJjyIIADiIdlgAAADOcbua22GFmQkCAABsxk4Q+yRlO6xgU7hDSW5LPtz2mE4/eObzPn7OGa7r6Nc800EACaGWdlgAAACOMZgJAgAAHBIpgjSEKILEWlIWQea+tE1/2njA6TA6xHBJaV6PMvwe5Wb6ldc7TZee20v5g3rrwpx0CiRAN1ff2FwEoR0WAACA/SKD0dkJAgAA7OZt6cvZ0EgRJNaSsgjSnYRN6WRDk042NOlwVb02HzihF8sOS5KG5qbrxnHn6oaxedxFDnRTre2w+HEMAABgt0g7LG7ABAAAdvN5mt/PDZKIxFxSvus2d9Iw/eSai6Iff/yeH/MMdwGdfs6nnCDJ/NjBj1/Tka/bGDJV3xjS8dqgKgL1erfipP627yO9vfcj7Syv1tyXtmnBX3brzn+9QN8cc448bsa8AN1Jazss/u0CAADYjXZYAADAKcwEsU9SFkH83XAA8QCdJUn60vDmj6tqG/X8lkNavH6vDh2v0+yVW7X8bwf1q29cogty0h2MFEBntLbDSsofxwAAAI5iMDoAAHAKRRD7cOtxN5WZlqJvXzFQa/9zon76lWFK93v094Mn9JX/Xq/Ff91zxl0lAOJPbbBJkpTaDYuzAAAA3V1kJkgTO0EAAIDNfJHB6E0hhyNJfBRBujmfx61brhyoNXdO1L8MzVYwFNbPX9mhf//jZgXqG50OD8CnaG2HRREEAADAbtHB6BRBAACAzdgJYh+KIAkiN9OvxdPG6oHrh8vrNrR6W7mufWy9dhwJOB0agE/Q2g6LIggAAIDdmAkCAACc4qMIYhuKIAnE5XJpyuXn6bnbC9S/Z6r2fVir6xe8qZWbDzkdGoB2RHeC0A4LAADAdu7mGohCtBMGAAA287oj7bAogsQaRZAENDKvp17+3pWaeEEfNTSFVfzs3/Xwq7vY4g3EoUgRhMHoAAAA9qMdFgAAcIqv5YbYYIgiSKxRBElQvc7y6rffvkz//vnBkqTH/7Jb/7F8S7T1DoD4EPk3merlxzEAAIDdDFdLOyx2ggAAAJtFdoLQDiv2eNctgRmGS3d9aage+sYlSnG79PI/jujGRW/pWHWD06EBaNHaDoudIAAAAHZzMxMEAAA4JDIYnXZYsUcRJAncMDZPv/9OvjJTU7TlwAl99Yk39W5FtdNhAZBUF2QwOgAAgFOi7bDYCQIAAGzmixZB6NwTaxRBkkTB4LP1/MzxGnB2mg4dr9PXn9igN9495nRYQNKrDTZJklIpggAAANgu2g6LGzABAIDNIjtBaIcVexRBksigPj20cuYVGjegt6obmjR96d+04m8HnA4LSGp1kZkgKRRBAAAA7OaJtsPizQcAAGAviiD2oQiSZHqf5dUfbh2nr43ur1DY1F3/u1UPv7pLJtu/AduFw6bqG5tf6GiHBQAAYD/DYCcIAABwRmQwOjNBYo8iSBLyedx65IaR+o8vDJEkPf6X3frBijL6zwE2O9nSCkuSzvIxGB0AAMBu7kg7LG4KAwAANvO1dAVhJ0jsUQRJUi6XS8VfvFAPff0SeQyXXiw7rKlPb1RVbaPToQFJo7K6QZKU7vPITzssAAAA20UHo4cpggAAAHtFdoIE2ZIacxRBktwNl+Xpt9MvUw+fR2/v/UhfX7hBBz+qdTosICkcaymC9En3ORwJAABAcmpth0URBAAA2MvHTBDbUASBJpzfR8/dXqDcDL92Hz2prz6xQf84dMLpsICEd+xkcxEkqwdFEAAAACe4m2sgCtMOCwAA2CxSBGFEQexRBIEk6aK+GXrhjit0Ud8MVZ5s0OQn39Jr2yucDgtIaOwEAQAAcBY7QQAAgFO87ASxDUUQROVm+vXsdy/X5y7oo7rGkG77wzv6fek+p8MCElblSYogAAAATmIwOgAAcApFEPt0qQiyYMECDRgwQH6/X/n5+dq4cWOHrlu+fLlcLpeuv/76rnxZ2CDdn6Knp43Vv12Wp7ApzXlxm37xynYGBQIxENkJktXD63AkAAAAySkyGD0U4vcdAABgL2+0HRZFkFjrdBFkxYoVKi4u1ty5c7V582aNHDlSRUVFOnr06Cdet2/fPv3whz/UhAkTuhws7JHiNlTytRH6UdGFkqRFf92rWX/arPpG+tMBVqIdFgAAgLOiRRB2ggAAAJv5PG5J7ASxQ6eLII8++qhmzJih6dOna9iwYVq4cKHS0tK0ZMmSdq8JhUK6+eab9bOf/UyDBg36TAHDHi6XS3dcNUT/9W+j5HUbWrW1XDcvflsf1QSdDg1IGMdohwUAAOCoSBGEne8AAMBu0Z0gIYogsdapIkgwGNSmTZtUWFjY+gkMQ4WFhSotLW33uvvvv1/Z2dm65ZZbOvR1GhoaFAgE2jzgjOtG9dfvbxmnDL9Hm/Yf19eeeFP7KmucDgtICJXVzUXFrB4UQQAAAJxgMBMEAAA4xOtunQlikovEVKeKIJWVlQqFQsrJyWlzPCcnR+Xl5We8Zv369Xr66ae1aNGiDn+dkpISZWZmRh95eXmdCRMWu3zQ2Vo5c7zO6ZWqfR/W6qtPvKlN+z9yOiygWwuHTQajAwAAOCzaDosbMAEAgM18Ka1vzQdJRmKqS4PRO6q6ulpTpkzRokWLlJWV1eHrZs+eraqqqujj4MGDMYwSHTEkO10rZ47XJedk6nhto25c9Lb+39YjTocFdFsn6hrV1NJ24eyzKIIAAAA4IdoOi7svAQCAzSI7QSTmgsSapzMnZ2Vlye12q6Kios3xiooK5ebmnnb++++/r3379mnSpEnRY+Fw8/9Qj8ejXbt2afDgwadd5/P55PPxpmC8yU73a/ltl+s//rRFr+04qpnLNuveqy/SLVcOlKtlGzmAjokMRe+VlhLtAQkAAAB7RdthMRMEAADYjCKIfTr1zpvX69WYMWO0du3a6LFwOKy1a9eqoKDgtPOHDh2qrVu3qqysLPq49tprddVVV6msrIw2V91QmtejJ6eM1bSC82Sa0s9f2aH7XtrGLw1AJ0VaYTEPBAAAwDmR9x74fQYAANjNMFxKcTffkNFAESSmOrUTRJKKi4s1bdo0jR07VuPGjdP8+fNVU1Oj6dOnS5KmTp2q/v37q6SkRH6/X8OHD29zfc+ePSXptOPoPtyGS/dde7HyeqfpF6t26Hel+/XBiXr9942jlObt9LcUkJQiO0GYBwIAAOAct4t2WAAAwDk+j1uNoSZ2gsRYp9+xnjx5so4dO6Y5c+aovLxco0aN0urVq6PD0g8cOCDDoLVLonO5XLp1wiD165mqH6wo02s7KvRvT72lxdPGKjvd73R4QNyjCAIAAOA8o2UmSBM7QQAAgAO8HkNqYDB6rHXptv1Zs2Zp1qxZZ/y7devWfeK1S5cu7cqXRJy6ekRf5WT4dOvv3tE/DlXpa09s0NLpl2lIdrrToQFxjXZYAAAAzvNEBqNTBAEAAA6IzAVpaKQIEkts2cBnNua83lo58woNODtNh47X6WtPbNBbez50OiwgrrETBAAAwHmRnSDMBAEAAE7wpTS/PR8MhRyOJLFRBIElBmadpZUzr9Cl5/ZUoL5JU5/eqBfLPnA6LCBuHWvZCdKHnSAAAACOicwECTETBAAAOCC6E4SZIDFFEQSW6X2WV8tmXK6rR+QqGArr+8vL9Pjr78nkFwrgNOwEAQAAcJ6bdlgAAMBBXk/LThCKIDFFEQSW8qe49fiNl+q2zw2SJD3853f1gxVlqm9kSxdwKmaCAAAAOM+I7gRxOBAAAJCUIkUQdoLEFkUQWM4wXLrn6ov08+uHy2O49GLZYd3wZKnKq+qdDg2IC02hsD6sCUpiJwgAAICTPO7ITBDeeAAAAPbzsRPEFhRBEDPfuvw8/eGWfPVKS9E/DlVp0uPrteXAcafDAhxXeTIo05QMV3MbOQAAADijZ1pzLvbhyaDDkQAAgGTk9bglUQSJNYogiKmCwWfrxTuu1IU56TpW3aDJT72llZsPOR0W4Kgd5QFJ0oCss6J9qAEAAGC/vpl+SdIRdq0DAAAHMBjdHhRBEHPnnp2m/505Xv86LEfBprCKn/27SlbtUIjhg0hS/zxUJUm6pH+mw5EAAAAkt0gRpLyqXqbJ7ycAAMBevpRIOyzmKccSRRDYoofPoye/NUazrhoiSXryjT265Xd/U6C+0eHIAPv944PmIshwiiAAAACOyk73y+WSgqGwPqqhJRYAALCXr2UnSDDETpBYoggC2xiGSz8sulCP3Tha/hRD63Yd0/UL3tTuoyedDg2w1T9biiAjKIIAAAA4yusxlNXDJ4mWWAAAwH5eBqPbgiIIbDdpZD89993x6pvp155jNbru8fV6+R+HnQ4LsMWx6gYdqaqXyyVdTBEEAADAccwFAQAATokUQZgJElsUQeCIEedk6qVZV+ryQb1VEwxp1rItuv//tquRrV9IcJFdIIOyzlIPn8fhaAAAANA6F6TO4UgAAECy8bETxBYUQeCYPuk+/fGWfN0+cbAkacmbe3XjU2+pnDuwkMC2thRBLjmnp7OBAAAAQJLUNzNVknSY30MAAIDN2AliD4ogcJTHbejuLw/Vk1PGKN3n0Tv7j+srj/1VG96vdDo0ICa2MhQdAAAgruRGd4JQBAEAAPbyut2SKILEGkUQxIWii3P10veu1NDcdFWeDOpbi9/W46+/p1DYdDo0wFJbDzEUHQAAIJ60zgShHRYAALCXL4V2WHagCIK4MTDrLD0/8wp97dL+CpvSw39+Vzcvpj0WEsfR6nqVB1qGovfLcDocAAAAqLUdFoPRAQCA3bzuliIIc5JjiiII4kqq161HvjlSD39zpNK8br215yN96b/e0J+3lTsdGvCZvVR2WJI0rG+GzmIoOgAAQFxo3QlSL9NkJzoAALCPNzoYPeRwJImNIgjijsvl0jfGnKOXv3elhvfP0InaRt32h0265/mtOtnQ5HR4QJc0hcL67Zv7JEnfuvw8Z4MBAABAVE5GcxEk2BTW8dpGh6MBAADJhMHo9qAIgrg1qE8Prfz3KzRjwkBJ0rK3D+hL899gaDq6pdd2VOiDE3XqlZair47u73Q4AAAAaOH1GMrq4ZMkHT7BXBAAAGCfyI7U7YcDCjMbOWYogiCueT2G7r1mmJbdmq/+PVN16Hidblr0tua++E/VBtkVgu5jyfp9kqSb88+TP8XtbDAAAABoI/IGBPMIAQCAncYN7K10n0dHqxu0+cBxp8NJWBRB0C2MH5KlV+/8nG7KP1eS9LvS/frir9/Q6zsrHI4M+HQvln2gjfs+ksdwaUoBrbAAAADiTW5kLkiAIggAALCPz+PWv1yULUla/U9mIscKRRB0Gz18Hj341RH6/XfGqV+mX4eO1+k7S9/RjN+/o/ePnXQ6vJgwTVNNobDqG0Oqrm/URzVBHQ3U69DxWh34sFaHjteqIlCvD082qKq2UTUNTWpoCnVo+5xpmkk9+DEcNhVsCqsu2Ly2VbWN+vBkg44G6nWkqk5HqupUEajXseoGfVQTVFVtowL1jTrZ0NR8fl3zNdX1je2uo2maWvb2Af1gRZmk5lkgkZ7TAD6bBQsWaMCAAfL7/crPz9fGjRs/8fznnntOQ4cOld/v14gRI7Rq1SqbIgUAdAf9IkUQ2mEBAACbfWl4X0nS//tneVK/VxdLHqcDADrrcxf00Zriifqvte/p6fV7tWZ7hV7feVQ3jM3TrRMGanCfHh3+XI2hsPZ/WKvdR09qT+VJnaxvUkNTWA1NITU0hhUKmwqZpsJm85vmYdNUKNzysXnqx6bCYSnUUlg4/Rydcrz170JhU+GwqaaW401hU6FQ89dsCplqDIfV1Z99hkvyGIY8bpcktYkxbJrRz3vqeW7DJY/hktswlNLmY1f0nI9/fOo1HsMlU83PqzFkqikcbv5vKNzmWOS5hULNz/lULtcpf5arneORY81/MozWc0OnrGW45f9fKNz6iPydla8pHsOlnmkp6pnmVe80r3qmpciX4tamfR/pcEtLhZvzz9Wcrwyz7osCSWzFihUqLi7WwoULlZ+fr/nz56uoqEi7du1Sdnb2aedv2LBBN954o0pKSvSVr3xFy5Yt0/XXX6/Nmzdr+PDhDjwDAEC8yc1MlUQ7LAAAYL+JF/RRaopbH5yo0z8/CGjEOZlOh5RwXGY3KC8FAgFlZmaqqqpKGRkZToeDOPJeRbV+uXqnXttxNHrsyiFZ+sLQbI0b2Fs5GX718HkUqG/UseoGvX/spHYfbX68d/Sk9n9Yo8ZQ3P8TaMPlklLczQWHplPe3Ic1XC7J7XLJ5VK0mPVZeN2Gbp84SHf+6wXRog1glWR9fczPz9dll12mxx9/XJIUDoeVl5en733ve7r77rtPO3/y5MmqqanRyy+/HD12+eWXa9SoUVq4cGGHvmayrjUAJIv/+/thfe9PW+TzGPp+4fm6YWyePIZLRssNP0YC5XFWzqfj9dE+rDUAJLaZz2zSqq3l+tro/pr1hSHRVp0uudreGOxqvRG4u6YnHsNl2XtkHX19ZCcIurXzc9K1eNpl+tu+j/TUG3v02o4Krd9dqfW7Kzv8OdK8bg3u00NDsnuoV5pXXo8hn8eQ12PI6zaa3xRv+cXHMFwyWt4kb/Nxyz/e5uOSYbT82VDzeS5Xyzkt10Y+X8suDMNo/q/b+NjuC7dLXrchT0vRI8XdfM7HRXaTNIXDzf9t2X0R+bOkaCzuU762pOiuk1DL5wi17N449eOmUOTzt/9xKGyqMWzKJbXsImneTRLZMRI9Zrian4/bpZSW5+5S8w6SiDOVZiPHTj0vbLa09Trl792n/KLavJ6S2zCi/z8i6+wxXHK7m/8/nbrmZ/ohHN0FdMoOmsiLTihs6kRdUMdrGnWiNqjjtY06XhvUyYYmXdQ3Q+MG9Faql0HogFWCwaA2bdqk2bNnR48ZhqHCwkKVlpae8ZrS0lIVFxe3OVZUVKQXXnih3a/T0NCghoaG6MeBQOCzBQ4AiGtFF+dqwvlZ+ut7lXpo9S49tHqX0yHFhNtw6f0Hr3Y6DAAA8DFfGt5Xq7aWa+WWD7RyywdOhxNT2+8vUprX3rIERRAkhMsG9NZlA3pr/4c1+vO2Cr3x3jFtOxzQ8dqgTLO55VOvNK8G9TlLQ7J7RIse5+ekq2+GX8YZCgvdiWG45DVc8jLmJyYMwyVDrnZ/YKZ6U9W3pYUCgNiqrKxUKBRSTk5Om+M5OTnauXPnGa8pLy8/4/nl5e0PnSspKdHPfvazzx4wAKBb8HoM/f4747Ry8wd66NWdqgg0fPpFAAAAFrl6eK7e+8IQvbm7Uv88HFCwKex0SAmFIggSynlnn6UZnxukGZ8bJKm5nVFdY0hpKe5uX+gAANhn9uzZbXaPBAIB5eXlORgRACDWXC6Xvj7mHH19zDnReX6RXdM0nwUAALHkcRv6zy9eqP/84oVqCjV3aTFlntIZpbUbinTmLirdRaqFrTk7iiIIEprbcKmHj29zAEgUWVlZcrvdqqioaHO8oqJCubm5Z7wmNze3U+dLks/nk8/n++wBAwC6JZeruTWtRxK/TgAAADs1t5F3OorEQu8cAADQbXi9Xo0ZM0Zr166NHguHw1q7dq0KCgrOeE1BQUGb8yVpzZo17Z4PAAAAAAASB/e0AACAbqW4uFjTpk3T2LFjNW7cOM2fP181NTWaPn26JGnq1Knq37+/SkpKJEnf//73NXHiRD3yyCO65pprtHz5cr3zzjt66qmnnHwaAAAAAADABhRBAABAtzJ58mQdO3ZMc+bMUXl5uUaNGqXVq1dHh58fOHBAhtG62XX8+PFatmyZfvKTn+iee+7R+eefrxdeeEHDhw936ikAAAAAAACbuEwz/seoBAIBZWZmqqqqShkZGU6HAwBAXOD10T6sNQAAp+P10T6sNQAAp+vo6yMzQQAAAAAAAAAAQEKiCAIAAAAAANAJCxYs0IABA+T3+5Wfn6+NGze2e+7SpUvlcrnaPPx+v43RAgCQ3CiCAAAAAAAAdNCKFStUXFysuXPnavPmzRo5cqSKiop09OjRdq/JyMjQkSNHoo/9+/fbGDEAAMmNIggAAAAAAEAHPfroo5oxY4amT5+uYcOGaeHChUpLS9OSJUvavcblcik3Nzf6yMnJsTFiAACSG0UQAAAAAACADggGg9q0aZMKCwujxwzDUGFhoUpLS9u97uTJkzrvvPOUl5en6667Ttu2bbMjXAAAIIogAAAAAAAAHVJZWalQKHTaTo6cnByVl5ef8ZoLL7xQS5Ys0Ysvvqg//vGPCofDGj9+vA4dOtTu12loaFAgEGjzAAAAXUMRBAAAAAAAIEYKCgo0depUjRo1ShMnTtTKlSvVp08fPfnkk+1eU1JSoszMzOgjLy/PxogBAEgsFEEAAAAAAAA6ICsrS263WxUVFW2OV1RUKDc3t0OfIyUlRaNHj9bu3bvbPWf27NmqqqqKPg4ePPiZ4gYAIJlRBAEAAAAAAOgAr9erMWPGaO3atdFj4XBYa9euVUFBQYc+RygU0tatW9W3b992z/H5fMrIyGjzAAAAXeNxOgAAAAAAAIDuori4WNOmTdPYsWM1btw4zZ8/XzU1NZo+fbokaerUqerfv79KSkokSffff78uv/xyDRkyRCdOnNCvfvUr7d+/X7feequTTwMAgKRBEQQAAAAAAKCDJk+erGPHjmnOnDkqLy/XqFGjtHr16uiw9AMHDsgwWhtvHD9+XDNmzFB5ebl69eqlMWPGaMOGDRo2bJhTTwEAgKTiMk3TdDqITxMIBJSZmamqqiq2gAIA0ILXR/uw1gAAnI7XR/uw1gAAnK6jr4/MBAEAAAAAAAAAAAmJIggAAAAAAAAAAEhIFEEAAAAAAAAAAEBCoggCAAAAAAAAAAASEkUQAAAAAAAAAACQkCiCAAAAAAAAAACAhORxOoCOME1TkhQIBByOBACA+BF5XYy8TiJ2yEUAADgduYh9yEUAADhdR3ORblEEqa6uliTl5eU5HAkAAPGnurpamZmZToeR0MhFAABoH7lI7JGLAADQvk/LRVxmN7hlIxwO6/Dhw0pPT5fL5fpMnysQCCgvL08HDx5URkaGRREmL9bTOqyldVhLa7Ge1rF6LU3TVHV1tfr16yfDoMNlLJGLxC/W0zqspXVYS2uxntYhF+m+rMxFJP5dWYm1tA5raR3W0lqsp3WcykW6xU4QwzB0zjnnWPo5MzIy+Ka1EOtpHdbSOqyltVhP61i5ltx1aQ9ykfjHelqHtbQOa2kt1tM65CLdTyxyEYl/V1ZiLa3DWlqHtbQW62kdu3MRbtUAAAAAAAAAAAAJiSIIAAAAAAAAAABISElXBPH5fJo7d658Pp/ToSQE1tM6rKV1WEtrsZ7WYS0h8X1gNdbTOqyldVhLa7Ge1mEtEcH3gnVYS+uwltZhLa3FelrHqbXsFoPRAQAAAAAAAAAAOivpdoIAAAAAAAAAAIDkQBEEAAAAAAAAAAAkJIogAAAAAAAAAAAgIVEEAQAAAAAAAAAACSkhiyALFizQgAED5Pf7lZ+fr40bN37i+c8995yGDh0qv9+vESNGaNWqVTZF2j10Zj0XLVqkCRMmqFevXurVq5cKCws/df2TSWe/NyOWL18ul8ul66+/PrYBdiOdXcsTJ07ojjvuUN++feXz+XTBBRfwb71FZ9dy/vz5uvDCC5Wamqq8vDzdeeedqq+vtyna+PXGG29o0qRJ6tevn1wul1544YVPvWbdunW69NJL5fP5NGTIEC1dujTmccIe5CLWIhexDrmIdchFrEU+Yg3yEUSQi1iLXMQ65CLWIRexFrmINeI2FzETzPLly02v12suWbLE3LZtmzljxgyzZ8+eZkVFxRnPf/PNN023220+9NBD5vbt282f/OQnZkpKirl161abI49PnV3Pm266yVywYIG5ZcsWc8eOHea3v/1tMzMz0zx06JDNkcefzq5lxN69e83+/fubEyZMMK+77jp7go1znV3LhoYGc+zYsebVV19trl+/3ty7d6+5bt06s6yszObI409n1/KZZ54xfT6f+cwzz5h79+41X331VbNv377mnXfeaXPk8WfVqlXmvffea65cudKUZD7//POfeP6ePXvMtLQ0s7i42Ny+fbv52GOPmW6321y9erU9ASNmyEWsRS5iHXIR65CLWIt8xDrkIzBNchGrkYtYh1zEOuQi1iIXsU685iIJVwQZN26ceccdd0Q/DoVCZr9+/cySkpIznn/DDTeY11xzTZtj+fn55ne/+92YxtlddHY9P66pqclMT083f/e738UqxG6jK2vZ1NRkjh8/3ly8eLE5bdo0XuxbdHYtf/Ob35iDBg0yg8GgXSF2G51dyzvuuMP8whe+0OZYcXGxecUVV8Q0zu6mIy/0P/7xj82LL764zbHJkyebRUVFMYwMdiAXsRa5iHXIRaxDLmIt8pHYIB9JXuQi1iIXsQ65iHXIRaxFLhIb8ZSLJFQ7rGAwqE2bNqmwsDB6zDAMFRYWqrS09IzXlJaWtjlfkoqKito9P5l0ZT0/rra2Vo2Njerdu3eswuwWurqW999/v7Kzs3XLLbfYEWa30JW1fOmll1RQUKA77rhDOTk5Gj58uB588EGFQiG7wo5LXVnL8ePHa9OmTdFtoXv27NGqVat09dVX2xJzIuH1JzGRi1iLXMQ65CLWIRexFvmIs3gNSjzkItYiF7EOuYh1yEWsRS7iLLtegzyWfjaHVVZWKhQKKScnp83xnJwc7dy584zXlJeXn/H88vLymMXZXXRlPT/urrvuUr9+/U77Zk42XVnL9evX6+mnn1ZZWZkNEXYfXVnLPXv26PXXX9fNN9+sVatWaffu3Zo5c6YaGxs1d+5cO8KOS11Zy5tuukmVlZW68sorZZqmmpqadPvtt+uee+6xI+SE0t7rTyAQUF1dnVJTUx2KDJ8FuYi1yEWsQy5iHXIRa5GPOIt8JPGQi1iLXMQ65CLWIRexFrmIs+zKRRJqJwjiy7x587R8+XI9//zz8vv9TofTrVRXV2vKlClatGiRsrKynA6n2wuHw8rOztZTTz2lMWPGaPLkybr33nu1cOFCp0PrdtatW6cHH3xQTzzxhDZv3qyVK1fqlVde0QMPPOB0aABwGnKRriMXsRa5iLXIRwB0F+QiXUcuYi1yEWuRi3Q/CbUTJCsrS263WxUVFW2OV1RUKDc394zX5Obmdur8ZNKV9Yx4+OGHNW/ePL322mu65JJLYhlmt9DZtXz//fe1b98+TZo0KXosHA5Lkjwej3bt2qXBgwfHNug41ZXvy759+yolJUVutzt67KKLLlJ5ebmCwaC8Xm9MY45XXVnLn/70p5oyZYpuvfVWSdKIESNUU1Oj2267Tffee68Mg9p6R7X3+pORkcFdl90YuYi1yEWsQy5iHXIRa5GPOIt8JPGQi1iLXMQ65CLWIRexFrmIs+zKRRLq/4jX69WYMWO0du3a6LFwOKy1a9eqoKDgjNcUFBS0OV+S1qxZ0+75yaQr6ylJDz30kB544AGtXr1aY8eOtSPUuNfZtRw6dKi2bt2qsrKy6OPaa6/VVVddpbKyMuXl5dkZflzpyvflFVdcod27d0cTJkl699131bdv36R+oe/KWtbW1p72Yh5JoppnXqGjeP1JTOQi1iIXsQ65iHXIRaxFPuIsXoMSD7mItchFrEMuYh1yEWuRizjLttcgS8esx4Hly5ebPp/PXLp0qbl9+3bztttuM3v27GmWl5ebpmmaU6ZMMe++++7o+W+++abp8XjMhx9+2NyxY4c5d+5cMyUlxdy6datTTyGudHY9582bZ3q9XvN//ud/zCNHjkQf1dXVTj2FuNHZtfy4adOmmdddd51N0ca3zq7lgQMHzPT0dHPWrFnmrl27zJdfftnMzs42f/7znzv1FOJGZ9dy7ty5Znp6uvmnP/3J3LNnj/nnP//ZHDx4sHnDDTc49RTiRnV1tbllyxZzy5YtpiTz0UcfNbds2WLu37/fNE3TvPvuu80pU6ZEz9+zZ4+ZlpZm/uhHPzJ37NhhLliwwHS73ebq1audegqwCLmItchFrEMuYh1yEWuRj1iHfASmSS5iNXIR65CLWIdcxFrkItaJ11wk4Yogpmmajz32mHnuueeaXq/XHDdunPnWW29F/27ixInmtGnT2pz/7LPPmhdccIHp9XrNiy++2HzllVdsjji+dWY9zzvvPFPSaY+5c+faH3gc6uz35ql4sW+rs2u5YcMGMz8/3/T5fOagQYPMX/ziF2ZTU5PNUcenzqxlY2Ojed9995mDBw82/X6/mZeXZ86cOdM8fvy4/YHHmb/85S9n/PkXWb9p06aZEydOPO2aUaNGmV6v1xw0aJD529/+1va4ERvkItYiF7EOuYh1yEWsRT5iDfIRRJCLWItcxDrkItYhF7EWuYg14jUXcZkme3QAAAAAAAAAAEDiSaiZIAAAAAAAAAAAABEUQQAAAAAAAAAAQEKiCAIAAAAAAAAAABISRRAAAAAAAAAAAJCQKIIAAAAAAAAAAICERBEEAAAAAAAAAAAkJIogAAAAAAAAAAAgIVEEAQAAAAAAAAAACYkiCAAAAAAAAAAASEgUQQAAAAAAAAAAQEKiCAIAAAAAAAAAABISRRAAAAAAAAAAAJCQ/j9UzPfIzvt2TgAAAABJRU5ErkJggg==",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABkEAAAJtCAYAAACBs9diAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAC0F0lEQVR4nOzde3xU9b3v//daM5NJQm4EyAUIAnK/R1QEbasWRHRzatttPdpTWlvb0244P1t2211aL7UX6Tmtl+5dK7WtpXt3W2ut2osUVFqkFqyCRAEFRC4JkIRr7re5/f6YWZNEEiSQrLVmzev5eMwDMllDPoM4853v5/v5fIxYLBYTAAAAAAAAAACAx5hOBwAAAAAAAAAAADAQSIIAAAAAAAAAAABPIgkCAAAAAAAAAAA8iSQIAAAAAAAAAADwJJIgAAAAAAAAAADAk0iCAAAAAAAAAAAATyIJAgAAAAAAAAAAPIkkCAAAAAAAAAAA8CS/0wGcjWg0qiNHjig3N1eGYTgdDgAArhCLxdTY2Kjhw4fLNDnXMJBYiwAAcDrWIvZhLQIAwOnOdi2SEkmQI0eOqKyszOkwAABwpaqqKo0cOdLpMDyNtQgAAL1jLTLwWIsAANC791qLpEQSJDc3V1L8yeTl5TkcDQAA7tDQ0KCysrLk+yQGDmsRAABOx1rEPqxFAAA43dmuRVIiCWKVeubl5fFmDwDAu9ASYeCxFgEAoHesRQYeaxEAAHr3XmsRmnYCAAAAAAAAAABPIgkCAAAAAAAAAAA8iSQIAABIGRs3btTixYs1fPhwGYahZ5555j0f097erm984xu64IILFAwGNXr0aD366KMDHywAAAAAAHBcSswEAQAAkKTm5mbNnDlTn/70p/WRj3zkrB7zsY99TLW1tfr5z3+ucePGqbq6WtFodIAjBQAAAAAAbkASBAAApIxFixZp0aJFZ3392rVr9eKLL2rfvn0qLCyUJI0ePXqAogMAAAAAAG5DOywAAOBZf/jDH3TxxRfr//2//6cRI0ZowoQJ+vKXv6zW1tYzPq69vV0NDQ3dbgAAAAAAIPVQCQIAADxr3759eumll5SZmamnn35ax48f17/8y7/oxIkT+sUvftHr41auXKl77rnHxkgBAAAAAMBAoBIEAAB4VjQalWEY+u///m9deumluu6663T//ffrl7/85RmrQVasWKH6+vrkraqqysaoAQAAAABAf6ESBAAAeFZpaalGjBih/Pz85H2TJ09WLBbToUOHNH78+B4fFwwGFQwG7QoTAAAAAAAMECpBAACAZ11++eU6cuSImpqakvft2bNHpmlq5MiRDkYGAAAAAADsQBIEAACkjKamJlVUVKiiokKStH//flVUVKiyslJSvI3VkiVLktffcsstGjJkiG699Va9+eab2rhxo77yla/o05/+tLKyspx4CgAAAAAAwEYkQQAAQMrYsmWLysvLVV5eLklavny5ysvLddddd0mSqqurkwkRScrJydHzzz+vuro6XXzxxfr4xz+uxYsX69///d8diR8AAAAAANiLmSAAACBlXHnllYrFYr1+f/Xq1afdN2nSJD3//PMDGBUAAAAAAHArKkEAAAAAAAAAAIAnkQQBAAAAAAAAAACeRBIEAAAAAAAAAAB4EkkQAAAAAAAAAADgSSRBAAAAAAAAAACAJ5EEAQAAAAAAAAAAnkQSBAAAAAAAAAAAeBJJEAAAAAAAAAAA4El+pwMAAAAAvCgWi8kwDKfDAADgNNFoTLEe7u/pXevdb2W8twEAUg1JEABIM60dEf2+4rAee6VS+481q/yCwbpywjDdMmeUMgM+p8MDgJT2zLbD+sXf96vyZIsi0ZhunjNKn3vfWA3JCTodGgAAST/esFc/eG7PgP6MnnIl776rp4TKu+/x+wzlZQY0JCeomSPzdfHoQv3TjFI+uwAAzhpJEABII/UtIX3sJ5u1u7Yxed/GPce0cc8x/ebVKv3HLeWaUJzb5z9379FGvVZZp4MnmuU3TV0xfqjKywrk99F1EUD6eHnfCS1/okLRLkdrf/LiPv3X5oP6zg3T9JGLRjoXHAAANov1UGpy2l09XfQu4WhMbaF2HW1s11vVDXr81Sr9+/q3dec/TdH8yUVUpgAA3pMRi53FO47DGhoalJ+fr/r6euXl5TkdDgCkpPZwRJ/4+St6Zf9JDc0J6nPvH6M5Y4bo1QMnterFfTre1K6g39Sd/zRFH58z6qw+TLxWeUo//utevfDW0dO+N6IgS/992xyNHjpoIJ4OxPujnfi7xns52dyh6374N9U0tGnxzOH6lysv1OFTrfrh+re1/XC9JOkzV4zRikWTSBAD8Ix0fH98+OGH9fDDD+vAgQOSpKlTp+quu+7SokWLerx+9erVuvXWW7vdFwwG1dbW1qef299/122hiNpCkW73vXt3qKfNop62kHq+7t3XnE1GpOc/qyMcVUNbSIdOtWpbZZ2e3nZItQ3tkqQvzh+vL86f0MOjAADp4GzfH6kEAYA08bXfbdcr+08qN+jXr267VJNK4m8OM8sK9KFZI/Tl376uF/cc0x3P7NDf3j6m79wwXcNyT2/fEovFtOmdE3ror3u16Z0TkuKl7nPGFGpcUY7qWkL629vHdbiuVZ/9zy166l/mKTczYOtzBQC7fePp7appaNPYYYP0vY9M16CgX5NL83T1pCI98MIe/cdf9urnL+3XrpoG/ejmizR4UIbTISPNRaMxGQa9/YG+GjlypL73ve9p/PjxisVi+uUvf6kPfehD2rZtm6ZOndrjY/Ly8rR79+7k1274/y4z4Eu5dlJTh+dr4dQS/Z+rx+mH69/WIxv36cEX3tboIYN0Q/kIp8MDALgYSRAASAMv7jmmp7cdlt80tOoTs5MJEMuw3KB+8alL9Ojf9+v/rt2ldTtr9be3j+uz7xurG8pHaPSQbJ1qCenlfSf0yMZ9qqiqkyT5TUMfuWiEPv+BCzV2WE7yz6ttaNPi/3hJbx9t0pd+U6FHPnGxTNP5D3sAMBD2HWvSn3fUyDCk/7i5XIOCnUts0zT0r9dM1JTSPP3rb1/X3/ee0P946CXdef0UXT2piKoQOKItFNHi/3hJfp+pPy67XH6fqU3vHNe+Y8265dJRvGcDZ7B48eJuX3/3u9/Vww8/rJdffrnXJIhhGCopKbEjvLQwKOjX16+bLMOIt5386pNvqKwwW7MvGOx0aAAAlyIJAgAeF4pE9e0/vSlJ+uS80bp83NAerzNNQ7e9b6wuGztE33hmh16vqtMP17+tH65/W1kBn1q7lMsH/aZuvnSUPvv+sRpRkHXan1Wcl6lHllysj/1ks15466he3ndC83r5uQCQ6n79SqUk6eqJRZo6PL/HaxZNL9WYYYP0uf/cqsqTLfrcf21VSV6m5owt1PiiHI0cnK2S/EyNHTpIw3KDrjglDG9oaAspJ8PfLbHxwlu1evtokyTpH/tPavYFg/W//2urGtvCys8KaPHM4U6FC6SUSCSi3/72t2pubtbcuXN7va6pqUkXXHCBotGoLrroIt177729Jkws7e3tam9vT37d0NDQb3F7xb8tnKSDx1u0dmeNvvH0dj37/71PPpK4AIAekAQBAI/71csHtfdokwoHZej/++D497x+2oh8PfMv8/TnHTV69KX9euNwfTIBMmboIF07rUSfuWKMhuac3iqrq1llBbp6YpHW7qzRm9UNJEEAeFJbKKLfbj0kSbplzqgzXjupJE9/WHa5Ht7wjn679ZBqGtr0+4ojp11XOChD00fk65LRg3Xx6ELNKitwfcuSWCymUCSmaD+NGzQMKWCaVCS8hxNN7aptaNekktzT/q7qW0L6wXO79at/HNT7xw/To5+6JLk5+LvEv1lJ+vOOatW3htTYFpYkPfjCHl03vVShSFTrdtboT29U6+3aRv3fj87QnLFDTothd02jnt52WIYhleRl6sJhOZo6PK9PLd9ONndox+F6RWMx+U1T44tzVNTHZGBdS4f2Hm1SdX2bwtGoRhRka0JxjgqyaT2H/rd9+3bNnTtXbW1tysnJ0dNPP60pU6b0eO3EiRP16KOPasaMGaqvr9cPfvADzZs3Tzt37tTIkSN7/RkrV67UPffcM1BPwRNM09D3Pjpdm945rl01jfrd1kP62CVlTocFAHAhBqMDgIedbO7Qld//qxrawvruh6fp43Mu6POfEYpEdeB4s4ryMpWf1bfZHj9Yt1s/+ute3XxpmVZ+ZEaffzbOjPdH+/B3jd48ve2QvvSb1zU8P1N/+7erz/oEans4ok17T2hXTaP2Hm3SkbpWHalvVdXJFkXftToP+AyNL8rV6KHZKs3PUm6mX7mZAeUG/Z2/z4z/Pifol99nymcYMk3JZxqKRGOKRONJinA0qnAkpnA0pkg0qlAkptZQRC3tETW1h9XcHlZzR7jz94n7Wzriv29NDNJt7Tj99++Ouz8EfIYyfKaCAZ8yfKYy/KaC/vivGX5TGT5TmQGfgv74NUG/qcyAqaA/cZ/fl/jalN9nKhaLKRqTorGYYjEl/5xg4haJSuHE30skGlXAZyo7w6esDL+yM3wKR2JqaAupIZE0iMZiyssKKD8roLzMgIIBUw2tIdW3xq9pC0VVnJ+pEQWZGpydoewMv6pOtWjfsfhm/fGmdhXlZmpKaZ7GDhuk0vwsHTjRrL+9fUw7Djdo79EmBfymPjipSFdPKtLEklw1toX1X5sP6o9vHNHeRDXHtBF5uuP6KboskaR4bmeNVjy1XSeaO5J/l8uuGqcvL5yoow1tumzl+uR/r6E5Qc0qK9ALb9Umr/3Kwon64+tHtKumMXnf0Jyg1tx+hYpyMyVJu2oadM8f3tTmfSd6/G83rihHH5xcpAWTi1U+avBp/2/Ut4b0h4rD+uPr1dpy8ORp/36G5gQ198Ihev/4ofrAhGEqysvs9v3Wjoi2HDypl/Ye16a9J7TjSP1pg5gz/Ka+e8M03Xgxm6IDJV3fHzs6OlRZWan6+no9+eST+tnPfqYXX3yx10RIV6FQSJMnT9bNN9+sb3/7271e11MlSFlZWdr9XZ+Nn/1tn77z7Fsqyg1qw1euVHYG530BIF2c7VqEJAgAeNidz+zQf718UJNKch0pD39m22F98TcVumT0YP328/Ns/dnpgPdH+/B3jd7cuGqTXj1wSv+6YIL+z1lU272XtlBEu2sata3ylF49eEpbDpxUbUP7ez8QaSHDb8qQ1B6OdruvI/H1pJJcjR02SGu210iKJyKunVqiH/11ryTpoVsu0qFTLVr5512aOTJf+483qyFRASJJH5o1vFt10pBBGbr50lF6/s1a7a5t1NyxQ/SVayfqH/tO6oHn96gjEpXPNLRgcrFK8jN1pK5Vu2sbdfBES7e4CwdlaM6YQl0wZJAMQ9p5pEGv7D+htlDn8xg7dJCygz61dkS0/3jzaUmRSSW5KivMVobP1P7jzdpT26jwuy4aUZClEQVZ8pmGKk+26HBdqyTpf79/rL62aBJt5gYA749x8+fP14UXXqif/OQnZ3X9jTfeKL/fr1//+tdn/TP4u+5dezii+fe/qKqTrf32fgwASA1n+/5IehwAPGpXTYP++x8HJUl3L57qSH/ccUXxYenWSVUA8JJDp1r06oFTMg31W/uNzIBPM8sKNLOsQJ+6fIxisZgOnWrVntpGHTjRoqONbWpsC6uxLaymtlDy941tITUmqjfOVJER8BnymYYCpim/z5DPjFdO5ATjlQ6DgvFqks5f4/cNSlRCZGf4lZURr77ICviUlRH/NTPgU6bfJ5+vf95rorGYQuGoOiJRdYSjag93/toejqgj3P3+tnBE7aHO77eHo2oLxX+N3x9RKBKVaRgyTUNmYjM81OX6jnBUpmko4DPkN035TUMdkahaOiJq6YiotSMsv89UbqZfeZkB5WUFZBrxigar8qM9HFVeZqIyJMuvoN+nmvo2HalvVX1LSE0dYZXmZerCohyNHJylIYOCOlLfqjePNKjqZIuaOyLKCvh02dhCXTKmUOOLcnWquUPrdtbolf0n1dgeT1hMH5GvT18xWh+YUKRoLKYHX9ijx1+p0q6axmT1xufeP1ZfvmaiMvym6ltD+q+XD2rpY6/Jn1gP3HTJKG05eFJPvXZYkjSxOFf3fni6Xnr7uE40d2jaiDw98omLNbwgSzeUj9D/+NFL2rzvhD7y403J/04fnFSkb90w7bT5YPUtIW18+5jWv1Wrv+4+ppPNHfrzjprT/jtPKM7Rxy4u07XTSjRycHby/taOiLYfrtff3j6mF/cc0/bD9d2em6U0P1OXjxuqK8YN1bwLh3SrFolGY3pw/dv69/Vv6ycb92nO2EJdPan4nP9NAmcSjUa7VW2cSSQS0fbt23XdddcNcFTpI+j36cvXTNTtj1fol5sP6HMfGKug391tJAEA9iIJAgAeFI3G9K0/vqloTFo0rURzLzy9h7cdxg4bJEk61RLSiaZ2DXmPOSIAkEqefzPePuji0YUqflernv5iGIbKCrNVVpj93hcnRKMxRWLxFljRWEymYSjgM2Ua4iS8w2KxWK//DWKxmBpaw8rK8CnDb3b73scuKVM0GlPVqRa1hiKaWJzb7c/5zg3T9a8LJmrdzhq9vO+EPjp7pN43fljy+3f802TFFNMTWw6pIxxV0G/q+hmlGpYbTCZBPlQ+XIOCfv3y05fqH/tP6pZLRykrI76JOK4oR/d/bKa+9cc3ZZqG8rMC+l+XXaD/eUlZj88nPzs+XH3xzOEKR6LacvCUdiYSPaFIVJNL8zSrrEBTh+f1+PisDJ8uHVOoS8cU6l+vmagTTe36x/6TqmsJqT0cUWl+pmaMLFBpfmavf5+maWj5ggk6UteqJ7ce0qsHTpEEQb9YsWKFFi1apFGjRqmxsVGPPfaYNmzYoHXr1kmSlixZohEjRmjlypWSpG9961u67LLLNG7cONXV1en73/++Dh48qNtuu83Jp+E5108v1ff+vEvV9W364+vV+ufZvc9bAQCkH5IgAOAxkWhMX/vdG9r0zgll+E19/brJjsWSneHXiIIsHa5r1d6jTSRBAHjKczvjSZBrprhrY9U0DZky5PJZ6mnpTEkowzCUn9377C3TNHTBkEG9fn/woAz9z0tH6X9eOuq07wX9Pn3nhun64vwJ+kPFEU0ozlV+VkDvGz9UQwZlqLE9rP8xc7gkadqIfE0bkX/an3HttFJdO630TE+vR36fqcvGDknOKzkXQ3KCum5633+2JJWPKtCTWw9px+H6c/75QFdHjx7VkiVLVF1drfz8fM2YMUPr1q3TggULJEmVlZUyzc5E5qlTp/TZz35WNTU1Gjx4sGbPnq1Nmzad1fwQnD2/z9Qn5l6g/7d2t37x9/366EUjSPwDAJJIggCAhxw43qzvr9utZ7dXy2ca+v4/z+jT6eGBMK4oR4frWvXOsWbNOY8NEABwk1PNHXrlwElJ0sKpJQ5HA5ydoTlBffqKMcmvMwM+PfmFeWrtiHRrR+Ul0xMJne2H689YiQOcrZ///Odn/P6GDRu6ff3AAw/ogQceGMCIYLn5klH64Qtva+eRBm05eEqXjC50OiQAgEuQBAGAFLf14Cm9uOeYXnr7mF6rrJMk+U1D/35z+TmfmuxP44py9OKeY8wFAeAp63cdVSQa0+TSPMeTzcD5GDO09+oSL5hYkquAz1BdS0iHTrXy/yvgYYMHZejD5SP0+KtVWv33AyRBAABJJEEAIEXFYjF9f91u/XjDO8n7TEN63/hh+sKVF55X24n+lByOfowkCADveG5nfMiz21phAegu6PdpQnGudh5p0I7D9SRBAI/7xNwL9PirVXr+zVqdau7Q4EEZTocEAHABkiAAkIJisZj+37rdejiRALl+eqmuGD9UV08qGrDhvOfKSoK8QyUIAI9oC0W08e1jkmiFBaSC6SPytfNIg7YfrtciF1TJAhg4U4fna0ppnt6sbtAf3ziiJXNHOx0SAMAFzPe+BADgNn94/UgyAfLNxVP00Mcv0s2XjnJdAkSSxg2LJ0EO17WquT3scDQAcP5e3ndCbaGohudnanJprtPhAHgP07rMBQHgff88e6Qk6XdbDzkcCQDALUiCAEAK+t1rhyVJ//sDY/Wpy8e8x9XOGjwoQ4WJMvR9x5odjgYAzt/GPcclSe+fMIwhy0AKsIaj70gMRwfgbR+aNVx+09Drh+q1p7bR6XAAAC5AEgQAUkx9S0ib9sY34G66uMzhaM7OhcPiQ1f3nyAJAiD1Wa2w3j9hmMORADgbE0ty5TcNnWoJ6XBdq9PhABhgQ3KCumpSkSSqQQAAcSRBACDFvPBWrcLRmCYW52psotWU2+UE4yOo2kIRhyMBgPNzpK5Ve482yTSkyy8c6nQ4AM5CZiA+HF2KV4MA8L6PXhRvifVMxWFFo1SAAUC6IwkCACnmzztqJEkLp6XOMF6/L/52E47wAQRAatu4J14FUj5qsPKzAw5HA+BsjUlUpR6pa3M4EgB2uGrSMOUG/aptaNfWylNOhwMAcBhJEABIIU3t4WQblkUplAQJ+OI988PRqMORAMD5SbbCGk8rLCCVBK0DGaxFgLQQ9Pu0YGqxJOnZN6odjgYA4DSSIACQQv6666g6wlGNHpKtSSW5Todz1nxm/O0mRCUIgBQWjkT10tvWUHRaYQGpxJ84kMFaBEgf/zSjVJK0Znu1IrTEAoC0RhIEAFLIlgMnJUkfnFwswzAcjubsBcxEJUiE05cAUtf2w/VqaAsrPyugGSMLnA4HQB8EfNaBDNYiQLq4Ytww5Wb6dbSxPfk5CgCQnkiCAEAKqTrVKkm6MEUGolv8yXZYnMACkLpe3hffQLlsbKF8ZuokogGQBAHSUYbf1MKp8RbCz26nJRYApDOSIACQQqpOtkiSygqzHI6kbxiMDsALXt53QpJ02dghDkcCoK8y/LTmBNLR9cmWWDW0xAKANEYSBABSRCwW06FEJUjZ4GyHo+mbZDsshpECSFGhSDTZSoMkCJB6Aomq1I4waxEgnVx+4VDlZwV0vKldr+ynJRYApCuSIACQIo43dag1FJFhSMMLUrMShNOXAFLV9sP1au6IqCA7oInFuU6HA6CP/GaiKpUDGUBaibfEKpYkPbv9iMPRAACcQhIEAFJE1al4K6zSvMxkS4dUkZwJQh9uACnKaoU1Z0yhTOaBACkn2Q4rzIEMIN1cP2O4JGntjho+jwBAmkqtXTQASGPWPJCRhanVCkuSAsnTl2w8AEhNnUPRaYUFpCKrHRaD0YH0M+/CISrIDuh4UwctsQAgTZEEAYAUkarzQCTJZ7LxACB1dZ0HMvdCkiBAKgokWnN2sBYB0k7AZ+raqSWSpD9tr3Y4GgCAE0iCAECKsCpBygpTax6I1Hn6MsxMEAAp6I1D9WrpiGhwdkATipgHAqQiaz4ZaxEgPV0/o1QSLbEAIF2RBAGAFGHNBBmZgpUgyY0H2mEBSEGd80CGMA8ESFEZtMMC0trcsfGWWCebO7Tl4CmnwwEA2IwkCACkiKqTVjus1KsE8Sc2DcNRNh4ApB4rCXLZ2EKHIwFwrmiHBaQ3v8/UBycVS5LW7axxOBoAgN1IggBACohEYzpSl0iCpOJgdFpQAEhR8Xkg8ROjlzEPBEhZ1lqEShAgfS2cGk+CPLezVrEYn0sAIJ2QBAGAFFBd36pwNKaAz1BxXqbT4fSZnxYUAFLUG4fq1BpiHgiQ6phPBuB944cpM2DqcF2rdh5pcDocAICNSIIAQAqwWmGNKMiSLwX70QdMZoIASE0v7zspSbpsLPNAgFRGJQiArAyfPjBhmCTpOVpiAUBaIQkCACnAGoqeiq2wJCUTN2w8AEg1nfNAaIUFpLLOmSAcyADS2cKpJZKkdTtrHY4EAGAnkiAAkAIOnYwnQUYOTs0kiJ8WFABSUEe4yzwQkiBASqMSBIAkfXBSsXymod21jTpwvNnpcAAANiEJAgApoLq+TZI0oiD15oFInRsPEdphAUgh2w/H54EUDsrQ+KIcp8MBcB46Z4KQBAHSWX52QJeNLZQkPfcmLbEAIF2QBAGAFNDcEZYk5WYGHI7k3PitdlhRNh4ApI5X9serQC4ZPZh5IECK66wE4UAGkO5oiQUA6YckCACkgJaOiKT4ML9UZG080A4LQCrZejA+FP3iCwodjgTA+eqcCcKBDCDdLZhSLEl6rfKUjja2ORwNAMAOJEEAIAVYSZDsFE2CWDNB6MMNIFXEYjFtPRivBJk9erDD0QA4Xxl+1iIA4krzszRzZL5iMemFN486HQ4AwAYkQQAgBbSmehLETFSCMBMEQIrYd7xZp1pCCvpNTRue73Q4AM4TVakAurom2RKLuSAAkA5IggBACmgNJdphBfwOR3Ju/AwjBZBith6IV4HMHFmgDD9LZiDV+WmHBaALay7IpneOq6Et5HA0AICBxic6AEgBqV8JYrWg4PQlgNRgtcK66AJaYQFeEOjSmjMWYz0CpLtxRTkaO2yQQpGYNuw+5nQ4AIABRhIEAFJAS0dYUuoPRo/QDgtAitiSHIpOEgTwgozEWiQWYz0CIG4hLbEAIG2QBAGAFGANRs8KpGYSJNkOK0oLCgDud6q5Q+8ca5ZEJQjgFdaBDIkZZQDirCTIhl1H1ZZoPwwA8CaSIADgcpFoTO3hePIgddthxd9uaIeF/rBx40YtXrxYw4cPl2EYeuaZZ876sX//+9/l9/s1a9asAYsPqe+1yngrrLHDBqlwUIbD0QDoD9aBDIm5IADiZozIV3FeUM0dEW1657jT4QAABhBJEABwudYup5KyM1JzMHqAwejoR83NzZo5c6YeeuihPj2urq5OS5Ys0Qc/+MEBigxesSUxD4RWWIB3BMzOj76hMOsRAJJpGrpmSrwa5LmdtQ5HAwAYSCRBAMDlrHkghiFlBlLzZdufaEERov0E+sGiRYv0ne98Rx/+8If79LjPf/7zuuWWWzR37twBigxesfWAlQQpdDgSAP3FNA35TWs4OusRAHFWS6zn36xlXhAAeFhq7qYBQBpp7TIPxDCM97janaxNBypB4JRf/OIX2rdvn+6+++6zur69vV0NDQ3dbkgPHeGoXj9UJ0maPZpKEMBLrLkgIdYjABLmjC1UXqZfJ5o7tDVRCQoA8B6SIADgclY7rFSdByJ1JkGiMSnKCSvY7O2339bXvvY1/epXv5Lff3Yt5VauXKn8/PzkraysbICjhFvsPFKv9nBUg7MDGjt0kNPhAOhH1lwQkiAALAGfqQ9OLpYkPbezxuFoAAADhSQIALhcS6ISJDOQwkkQX+fbTZgkCGwUiUR0yy236J577tGECRPO+nErVqxQfX198lZVVTWAUcJNrFOgsy8YnLLVdwB6lpGsBGEtAqDTNVMSSZA3axWL8foAAF6UmhN2ASCNWO2wUrkSxBqMLknhaFQZ5OBhk8bGRm3ZskXbtm3TsmXLJEnRaFSxWEx+v1/PPfecrr766tMeFwwGFQwG7Q4XLtCZBGEeCOA1tMMC0JMPTBymoN9U5ckW7a5t1KSSPKdDAgD0M5IgAOByViVIVkbqvmT7zc6kB6cvYae8vDxt3769230//vGP9Ze//EVPPvmkxowZ41BkcKNYLKYtiSTIxcwDATwn4I8fyuggCQKgi+wMv943fqheeOuo1u2oJQkCAB7U56O4Gzdu1OLFizV8+HAZhqFnnnnmjNc/9dRTWrBggYYNG6a8vDzNnTtX69atO9d4ASDttHSEJUnZKdwOq1slCBsPOE9NTU2qqKhQRUWFJGn//v2qqKhQZWWlpHgrqyVLlkiSTNPUtGnTut2KioqUmZmpadOmadAgZj6g06FTrTrW2K6Az9D0EflOhwOgnwUShzLCHMgA8C7XTCmRJD33JnNBAMCL+pwEaW5u1syZM/XQQw+d1fUbN27UggULtGbNGm3dulVXXXWVFi9erG3btvU5WABIR15oh2UYhnyJ4ejMBMH52rJli8rLy1VeXi5JWr58ucrLy3XXXXdJkqqrq5MJEaAvthw8KUmaNiI/pecwAegZ7bAA9OaDk4tkGtLOIw06dKrF6XAAAP2sz71VFi1apEWLFp319Q8++GC3r++99179/ve/1x//+Mfk5gUAoHed7bBSe0POZxqKRGNsPOC8XXnllWccWrl69eozPv6b3/ymvvnNb/ZvUPCE1w7WSZJmj6IVFuBFtMMC0JshOUFdPLpQr+w/qed21urTV9AyFQC8xPbJtNFoVI2NjSosZNgkAJyN1lDqV4JIUsCqBKEFBQCXev1QnSRp1qgCR+MAMDCSlSBhkiAATnfNlGJJtMQCAC+yPQnygx/8QE1NTfrYxz7W6zXt7e1qaGjodgOAdNXZDit1B6NLkj+x8UA7LABu1BaK6K3q+JpzVlmBs8EAGBAB1iIAzmDh1PhckFf2n9Sp5g6HowEA9CdbkyCPPfaY7rnnHj3xxBMqKirq9bqVK1cqPz8/eSsrK7MxSgBwF6sdVqr3p7eGo4ejnL4E4D5vVjcoFIlpaE6GRhRkOR0OgAFgrUVozQmgJ2WF2ZpcmqdoTHrhrVqnwwEA9CPbkiCPP/64brvtNj3xxBOaP3/+Ga9dsWKF6uvrk7eqqiqbogQA92kNhSWlfjssv5k4fUk7LAAuVFFZJyleBWIYhrPBABgQViVIB+2wAPSisyUWSRAA8BJbkiC//vWvdeutt+rXv/61rr/++ve8PhgMKi8vr9sNANJVS4c3ZoL4OX0JwMUqquokSTNHFjgaB4CBk5wJwoEMAL2wWmL97e1jybbEAIDU1+ckSFNTkyoqKlRRUSFJ2r9/vyoqKlRZWSkpXsWxZMmS5PWPPfaYlixZovvuu09z5sxRTU2NampqVF9f3z/PAAA8zkqCZKV4EoQ+3ADcjKHogPdlJNciHMgA0LPJpbkaOThLbaGoNr59zOlwAAD9pM9JkC1btqi8vFzl5eWSpOXLl6u8vFx33XWXJKm6ujqZEJGkRx55ROFwWEuXLlVpaWnydvvtt/fTUwAAb2v1SiWISSUIAHc62dyhgydaJEkzqAQBPMuqSqUdFoDeGIaha6bEq0HW7axxOBoAQH/x9/UBV155pWKx3k/xrl69utvXGzZs6OuPAAB00RpKVIIE+vyS7Sq+RBKEmSAA3MaqAhk7bJDyswLOBgNgwNAOC8DZuGZqsR79+36tf+uowpGo/D7bxukCAAYIr+QA4HJemQlibTxEaIcFwGW6DkUH4F2dSRAqQQD07uILBqtwUIbqW0N65cBJp8MBAPQDkiAA4HKtHWFJqZ8EYTA6ALeyhqKTBAG8LcNnVaWyFgHQO7/P1AcnFUmSnttZ63A0AID+QBIEAFzOqgTJDKR2EiRgMhgdgPvEYrHOoegkQQBPs1radNAOC8B7uGZqfC7IcztrztgSHgCQGkiCAIDLeWYwOpUgAFzo4IkW1bWElOE3Nakkz+lwAAwg2mEBOFvvGz9UWQGfjtS3aeeRBqfDAQCcJ5IgAOBisVhMLSErCZLag9Gt05cMRgfgJlYVyNThecrwszQGvCyDAxkAzlJmwKcPTBgmSVq3s8bhaAAA54tPegDgYh2RaHKQeFaKV4IEzEQf7igbDwDcYxtD0YG00VkJwoEMAO/tmqnFkpgLAgBeQBIEAFzMaoUlpX47LJ9pnb5k4wGAezAPBEgfAT/tsACcvQ9OKpbPNLS7tlEHjjc7HQ4A4DyQBAEAF2tNtMIK+Izk6cVUZcUfYTA6AJfoCEeTfb5JggDe5zdphwXg7OVnB3TZ2EJJ0nNv0hILAFJZau+oAYDHtSQqQbICqV0FIjEYHYD7vFXdoI5wVIOzAxpVmO10OAAGWAaVIAD66JopJZJoiQUAqY4kCAC4mNUOK9WHokuS30wMRqcSBIBLWK2wZpYVyDAMZ4MBMOCYCQKgrxZMic8F2Vp5Ssca2x2OBgBwrkiCAICLJStBUnweiBRv6SVJYU5fAnCJisRQ9JkjCxyNA4A9OpMgrEUAnJ3hBVmaMTJfsZi0/i2qQQAgVZEEAQAXa+kIS/JaOyxOXwJwhwprKPqoAkfjAGCPAK05AZyDaxLVIOt2MhcEAFIVSRAAcLHOdlgeSIIk22Gx8QDAefUtIe071ixJmkUlCJAWkpUgYQ5kADh710yNzwX5+94TamoPOxwNAOBckAQBABfzUjssv2m1w2LjAYDzrHkgFwzJ1uBBGc4GA8AWVhKkg0oQAH0wvihHY4YOUkckqhd3H3M6HADAOSAJAgAu1hryUCWIj8HoANzj9ao6SdKssgJH4wBgn+R8MqpSAfSBYRi0xAKAFEcSBABcrLMdlt/hSM4fg9EBuElFIgnCUHQgfdAOC8C5umZqPAny111H1RHm8wwApBqSIADgYt5qh5XYeKASBIDDYrFYsh0WQ9GB9JFMgnAgA0AflZcN1tCcoBrbw3p53wmnwwEA9BFJEABwsZZQfPBeVsADSRAqQQC4xKFTrTre1KGAz9CU0jynwwFgE6sqlZkgAPrKNA0toCUWAKQskiAA4GKd7bBSPwnS2Q6LShAAzrJaYU0uzVOmB5LMAM6OVQnCWgTAubBaYj3/Zq2iVLcDQEohCQIALkY7LADofwxFB9IT7bAAnI95Fw5RTtCvo43tybaaAIDUQBIEAFwsWQnigZPKtMMC4BYMRQfSE+2wAJyPoN+nKycOkySt21nrcDQAgL4gCQIALtYa8l4lSJhKEAAOCkWi2nGkXhJD0YF0QyUIgPN1zdQSSdJzbzIXBABSCUkQAHAx60O69aE9lVEJAsANdtc0qi0UVW6mX2OGDHI6HAA2yvAzEwTA+bly4jAFfIb2HWvW3qNNTocDADhLqb+rBgAeZn1I93sgCZIcjE4lCAAHVXSZB2KahrPBALBVcjB6NMZQY5yzhx9+WDNmzFBeXp7y8vI0d+5c/fnPfz7jY377299q0qRJyszM1PTp07VmzRqbokV/y8sMaN6FQyVJ63ZSDQIAqSL1d9UAwMMiiQ/oAQ9s1CUHo1MJAsBBrzMPBEhbVlWqJIWirEdwbkaOHKnvfe972rp1q7Zs2aKrr75aH/rQh7Rz584er9+0aZNuvvlmfeYzn9G2bdt0ww036IYbbtCOHTtsjhz95ZqpxZKk50iCAEDKIAkCAC5mfUD3eSAJkqwEoQUFAAd1rQQBkF4yulTWhliP4BwtXrxY1113ncaPH68JEybou9/9rnJycvTyyy/3eP0Pf/hDXXvttfrKV76iyZMn69vf/rYuuugi/ehHP7I5cvSXBVOKZRjS64fqdaSu1elwAABngSQIALiYlTDwxEwQqxKE9hMAHNLYFtLeY/H+3TNJggBpp+t6ihll6A+RSESPP/64mpubNXfu3B6v2bx5s+bPn9/tvoULF2rz5s12hIgBUJSbqYsvGCyJllgAkCpSf1cNADzMmp/hhUoQH4PRAThs+6F6xWLSiIIsDcsNOh0OAJv5TEPWkqqD9QjOw/bt25WTk6NgMKjPf/7zevrppzVlypQer62pqVFxcXG3+4qLi1VTc+bN8/b2djU0NHS7wT0WTi2RJK3dQRIEAFIBSRAAcDErYdC1h3WqCiQqQSJUggBwyDarFdaoAkfjAOAcv8+aUcZ6BOdu4sSJqqio0D/+8Q994Qtf0Cc/+Um9+eab/fozVq5cqfz8/OStrKysX/98nJ9rp8WTIK8eOKnjTe0ORwMAeC8kQQDAxZKD0b3QDiuRyGEwOgCnWEPRZzEUHUhb1lyQUJj1CM5dRkaGxo0bp9mzZ2vlypWaOXOmfvjDH/Z4bUlJiWpra7vdV1tbq5KSkjP+jBUrVqi+vj55q6qq6rf4cf5GDs7W9BH5isak59+sfe8HAAAclfq7agDgYZ4cjE4lCAAHxGKxzqHoVIIAaatzPUISBP0nGo2qvb3naoC5c+dq/fr13e57/vnne50hYgkGg8rLy+t2g7tY1SC0xAIA9yMJAgAulhyMbqb+y7U1GD1M+wkADqhpaNPRxnb5TEPThuc7HQ4Ah1jVtR1h1iM4NytWrNDGjRt14MABbd++XStWrNCGDRv08Y9/XJK0ZMkSrVixInn97bffrrVr1+q+++7Trl279M1vflNbtmzRsmXLnHoK6CfWXJBN7xxXfWvI4WgAAGeS+rtqAOBhXhqMTjssAE6qqKyTJE0szlVWhs/ZYAA4JpCcCcJ6BOfm6NGjWrJkiSZOnKgPfvCDevXVV7Vu3TotWLBAklRZWanq6urk9fPmzdNjjz2mRx55RDNnztSTTz6pZ555RtOmTXPqKaCfjCvK0fiiHIUiMf1lFy2xAMDN/E4HAADonTUYPeCFweiJTQfaYQFwQsWhOkm0wgLSXYBDGThPP//5z8/4/Q0bNpx234033qgbb7xxgCKCk66dVqK3/7JXa3fU6MPlI50OBwDQCypBAMDFrISB3wuD0U02HQA4x6oEYSg6kN46K0E4lAHg/FktsV7cc0wtHWGHowEA9Cb1d9UAwMOs+Rl+L7TDSswEiVAJAsBmkWhM2w/XS6ISBEh3tMMC0J+mDs9TWWGW2kJRvbj7mNPhAAB6QRIEAFwsHI1/QPd7oB2W9RwYjA7Abm8fbVRLR0SDMny6cFiO0+EAcBDtsAD0J8MwdG2iGmTtzhqHowEA9IYkCAC4mCcHo0fZdABgL6sV1oyRBZ54PQVw7qgEAdDfrp0WT4L85a2jag9HHI4GANATkiAA4FKRaEyxRNFEwEz9l2vrOcRitMQCYK/XGYoOIIGZIAD6W3nZYBXlBtXYHtamvSecDgcA0IPU31UDAI8Kd6mY8FI7LInTlwDstS1RCTKToehA2gv4qQQB0L9M00gOSF+7g5ZYAOBGJEEAwKW6zs7we6ESxNf5HMJUggCwSXN7WHtqGyVJ5VSCAGkvg5kgAAaA1RLr+bdqFeb1BQBcJ/V31QDAo7olQbxQCdKlDz8fDADYZcfhekVjUkleporzMp0OB4DDrIMlHbTDAtCPLh1TqILsgE42d+iVAyedDgcA8C4kQQDApbq1w/LAIN+uw4ipBAFgl4qqOknSrLICR+MA4A7JdlhhDmQA6D8Bn6kFk4slSetoiQUArkMSBABcykoU+ExDhpH6SRDDMJLJnDCnLwHYxBqKPpMkCABJgUR1bdfDJgDQH6yWWOt21irKoS8AcBWSIADgUlavai9UgVj89OEGYLOKxFB0KkEASFKGzxqMzgYlgP51+bihGpThU01DmyoShzAAAO5AEgQAXCqSOD3kpSRIINGHm3ZYAOxwtKFNR+rbZBrSjJH5TocDwAWsAxkdtMMC0M8yAz5dTUssAHAlkiAA4FLWCUW/zzsv1dbGA4PRAdjBmgcyvihXg4J+Z4MB4Aq+RIvRWIwDGQD637VT4y2x1u6s4XUGAFzEOztrAOAxXqwE8dOCAoCNGIoO4N2sOWsUpQIYCFdOHKag39TBEy16q7rR6XAAAAkkQQDApZIzQXzeSYIETIaRArAPQ9EBvJuZTIKQBQHQ/wYF/Xr/hGGS4tUgAAB3IAkCAC4VTlaCeOel2me1w+L4Jc7Dxo0btXjxYg0fPlyGYeiZZ5454/VPPfWUFixYoGHDhikvL09z587VunXr7AkWjolGY3qjql4SlSAAOlkFtixFAAwUqyUWc0EAwD28s7MGAB4TiXqxEiQxGJ12WDgPzc3Nmjlzph566KGzun7jxo1asGCB1qxZo61bt+qqq67S4sWLtW3btgGOFE5651iTGtvDygr4NKE4x+lwALiEaTITBMDAmj+5WH7T0O7aRr1zrMnpcAAAkpgQCQAulRyM7qmZIAxGx/lbtGiRFi1adNbXP/jgg92+vvfee/X73/9ef/zjH1VeXt7P0cEtrHkg00fkJ+cRAYCRrAQhCQJgYORnBzRv3FBt3HNMf95erWVXj3c6JABIe3wiBACXiniwHZb1XEL0oICDotGoGhsbVVhY2Os17e3tamho6HZDakkORR9V4GgcANzFZDA6ABtcNy3eEmvNdlpiAYAbeGdnDQA8xpOD0akEgQv84Ac/UFNTkz72sY/1es3KlSuVn5+fvJWVldkYIfpDcij6yAJH4wDgLiaVIABscM3UEvlMQ29WN+jA8WanwwGAtEcSBABcypqb4aU2LtZzCTETBA557LHHdM899+iJJ55QUVFRr9etWLFC9fX1yVtVVZWNUeJ8tYUi2lXdKIlKEADdWZUg5EAADKTCQRmaO3aIJGnNjmqHowEAeGdnDQA8Jhz14EyQxHMJR6kEgf0ef/xx3XbbbXriiSc0f/78M14bDAaVl5fX7YbUseNwvcLRmIbmBDU8P9PpcAC4iJFsh0UWBMDAWjQ93hLrz7TEAgDHkQQBAJeyEgWeSoIk2mFFaMQNm/3617/Wrbfeql//+te6/vrrnQ4HAyw5D6SsILnhCQAS7bAA2Gfh1BKZhrT9cL2qTrY4HQ4ApDWSIADgUlaiIOCldlgm7bBw/pqamlRRUaGKigpJ0v79+1VRUaHKykpJ8VZWS5YsSV7/2GOPacmSJbrvvvs0Z84c1dTUqKamRvX19U6EDxt0JkHynQ0EgOswGB2AXYbmBDVnTLwl1p9piQUAjvLOzhoAeIyVKPB5qBKEwejoD1u2bFF5ebnKy8slScuXL1d5ebnuuusuSVJ1dXUyISJJjzzyiMLhsJYuXarS0tLk7fbbb3ckfgw8ayj6rLLBzgYCwHWsZVWMShAANrhuRqkk6VlaYgGAo/xOBwAA6JmVKLASB16QrATh+CXOw5VXXnnGzavVq1d3+3rDhg0DGxBc5URTu6pOtkqSZlAJAuBdkjNBOI8BwAYLpxbrrt/v0OtVdTp0qkUjB2c7HRIApCUqQQDApazB6F6qBPFTCQJggFmtsC4cNkh5mQFngwHgOiaD0QHYqCg3U5eMLpQkrd1BNQgAOIUkCAC4lJUo8HtqJoiVBGHjAcDA6JwHQissAKezzpZESIIAsMn10+MtsdZsZy4IADjFOztrAOAxViVIwEOVIGbiucTExgOAgZFMgowqcDQOAO5kVYKQAwFgl2unlcgwpNcq61Rd3+p0OACQlkiCAIBLdbbD8s5LtbXxQDcsAAMhGo0lkyDlZQWOxgLAnRJLEdphAbBNcV6mLr4gXqFKSywAcIZ3dtYAwGO8OBjdRx9uAANo3/FmNbaFFfSbmliS63Q4AFyocyaIw4EASCuLptESCwCcRBIEAFzKi4PRraKWKDsPAAaAVQUyfUS+Ah6apwSg/5hUggBwwKLpJZKkLQdPqbahzeFoACD98OkQAFzKGh7upY08Tl8CGEgVVackSbNohQWgF9bhkhhJEAA2Ks3P0kWjChSLSet20hILAOzmnZ01APAYqxLE76VKENphARhADEUH8F4May3CfDIANrtuerwl1rNv0BILAOxGEgQAXMqaCeLz0EwQWlAAGChtoYh2VTdKohIEQO84kAHAKddOi7fEeuXASR1rbHc4GgBILyRBAMClrEqQgOmdl2rTZOMBwMDYcbhe4WhMQ3OCGlGQ5XQ4AFyq80CGs3EASD8jB2drZlm8Jdafd1ANAgB28s7OGgB4TDjRp8FTg9ETpy8jtKAA0M+SrbDKCpLtbgDg3ay1CDNBADhh8Yx4S6w/vU4SBADsRBIEAFyqczC6dzbzGEYKYKBsSyRBypkHAuAMDFpzAnDQ9YkkyKsHT6q6vtXhaAAgfZAEAQCXSg5G93nnpZqNBwAD5fUulSAA0JvOmSAOBwIgLZXmZ+nS0YWKxRiQDgB28s7OGgB4jDUY3U87LAA4o+NN7Tp0qlWGIc0Yme90OABczBq1xoEMAE5ZPDNeDfJHkiAAYBuSIADgUiGrEsRDSRCfwWB0AP2vorJOkjRuWI5yMwPOBgPA1TpngjgcCIC0de20UplGvIq18kSL0+EAQFogCQIALhVJzATxeagdlkk7LAADoIJWWADOksGBDAAOG5Yb1LwLh0qS/vjGEYejAYD04J2dNQDwmHA03jMq4KFKEDYeAAyEZBKEoegA3gMHMgC4QbIl1uskQQDADiRBAMClvDgY3WcyjBRA/4pGYwxFB3DWGIwOwA0WTi1RwGdoV02j9h5tdDocAPA87+ysAYDHhCPemwmSPH3JzgOAfrLveJMa28PKCvg0sTjX6XAAuJy1FolRCQLAQQXZGXr/+GGSpD++zoB0ABhoJEEAwKVCkXg7LL/PQ0kQk3ZYAPrXtsRQ9Okj8j1VOQdgYBhUggBwicUzh0uKzwUhMQsAA4tPigDgUpGoFytB2HgA0L+YBwKgL0zmkwFwiflTihX0m9p3rFlvVjc4HQ4AeBpJEABwqVAyCeKdl2raYQHob1YSZObIAkfjAJAaOgejOxsHAOQE/bp6UpEkWmIBwEDzzs4aAHhMJOrBdlicvgTQj1o7ItpVEx8mSiUIgLNhrUVoPQPADZItsV6nJRYADCSSIADgUp2D0b3zUm1tPERY3wPoBzuO1CsSjWlYblDD8zOdDgdACjCSlSAsRgA476qJRRqU4dPhulZtS1S3AgD6n3d21gDAY7w4GN3HYHQA/agiMRR9VllBctgxAJxJsio16nAgACApK8OnBVOKJcWrQQAAA4MkCAC4lDcHo8d/pdQbQH9IDkUvK3A0DgCpg9acANzGaon17BvVyc+AAID+RRIEAFwqZLXD8nnnpdo6qc3iHkB/sJIg5SRBAJylzgMZzsYBAJb3jR+mvEy/jja269UDJ50OBwA8yTs7awDgMV6sBOlsh+VwIABS3tHGNh2ua5VhSNNH5jsdDoAUYVAJAsBlMvymFk0rlST9vuKww9EAgDeRBAEAlwpHvTcTxMrnRMmCADhP1jyQ8UU5ys0MOBsMgJRhMhgdgAt9qDzeEutPb1SrLRRxOBoA8B6SIADgUsl2WKZ3Xqrpww2gvzAPBMC5MBNZEJYiANzksjFDNDw/U41tYa1/66jT4QCA53hnZw0APMaL7bA6kyAOBwIg5XUmQQY7GwiAlEIlCAA3Mk1DH75ohCTpqdcOORwNAHgPSRAAcKlQxIPtsBLvOmw8ADgf0WhMbxyql0QlCIC+MTiQAcClPlw+UpK0Yc8xHW9qdzgaAPAWkiAA4FLhxKfzgM87L9W0wwLQH9451qSm9rCyAj5NKM5xOhwAKYS1CAC3GleUo5llBYpEY/pDxRGnwwEAT/HOzhoAeEgsFku2w/J5sB1WhOOXAM7DtkQrrOkj8+X3UKIYwMCzllXkQAC40UetlljbaIkFAP2JT40A4ELhLkmCgCcHozscCICUZs0DKacVFoA+ohIEgJv904zhCvgM7TjcoN01jU6HAwCe4Z2dNQDwkK6VEl6aCWId2I6x8QDgPFRU1kliHgiAvjMYjA7AxQoHZeiqiUWSqAYBgP5EEgQAXMgaii55qx2WQTssAOeptSOi3bXxk5GzRhU4GwyAlENVKgC3+8hF8QHpz2w7zOcmAOgnJEEAwIXCkS7tsDzU797HxgOA87T9cL0i0ZiK84Iqzc9yOhwAKcZKglCVCsCtrpo0TAXZAdU2tGvTO8edDgcAPKHPO2sbN27U4sWLNXz4cBmGoWeeeeY9H7NhwwZddNFFCgaDGjdunFavXn0OoQJA+ug6E8RDhSAyaYcF4DxVVJ2SRCssAOfGTLbDcjYOAOhN0O/T4hnDJUlPbqUlFgD0hz4nQZqbmzVz5kw99NBDZ3X9/v37df311+uqq65SRUWFvvjFL+q2227TunXr+hwsAKSLcDTeDivgM5ItpLwg2Q6LJAiAc2QNRZ9VNtjZQACkJIPB6ABSwD/PjrfE+vOOGp1q7nA4GgBIff6+PmDRokVatGjRWV+/atUqjRkzRvfdd58kafLkyXrppZf0wAMPaOHChX398QCQFqx2WH7TO62wpC7tsKLvcSEA9IKh6ADOR7IShFIQAC42Y2S+pg7P084jDfrda4d02/vGOh0SAKS0Ad9d27x5s+bPn9/tvoULF2rz5s29Pqa9vV0NDQ3dbgCQTqx2WH4v9cJS12GkbDwA6LujDW06Ut8m04hvDgBAX3XOBHE4EAA4A8MwdMucUZKkx16ppJ0wAJynAU+C1NTUqLi4uNt9xcXFamhoUGtra4+PWblypfLz85O3srKygQ4TAFwlHImXSvh9HkuCJN51SIIAOBfbEq2wJhTnalCwzwXNAMCBDAAp40OzRmhQhk/7jjXrH/tPOh0OAKQ0V/ZZWbFiherr65O3qqoqp0MCAFtZlSA+j7XD6tx4cDgQACmpcx5IgaNxAEhdBoPRAaSInKBf/2PWCEnSf/+j0uFoACC1DfjuWklJiWpra7vdV1tbq7y8PGVlZfX4mGAwqLy8vG43AEgn1kyQgNcqQZIzQdh5ANB3zAMBcL5Mk0oQAKnj44mWWGt3VOtEU7vD0QBA6hrwJMjcuXO1fv36bvc9//zzmjt37kD/aABIWeGoN9th+WiHBeAcRaIxvXGoTpI0a1SBo7EASF3WuDWWIgBSwbQR+ZoxMl+hSExPbj3kdDgAkLL6nARpampSRUWFKioqJEn79+9XRUWFKivjpXkrVqzQkiVLktd//vOf1759+/TVr35Vu3bt0o9//GM98cQT+tKXvtQ/zwAAPKhzMLq32mEZiUqQCDsPAPpo79EmNXdElJ3h0/iiXKfDAZCimAkCINXccmm8GuTXr1RSUQ8A56jPu2tbtmxReXm5ysvLJUnLly9XeXm57rrrLklSdXV1MiEiSWPGjNGzzz6r559/XjNnztR9992nn/3sZ1q4cGE/PQUA8J6QNRjd9FglSLIdlsOBAEg5FVWnJEnTR+TL57HXRgD26ZwJwkYigNSweOZw5QT9OnCiRZv3nXA6HABISf6+PuDKK69U7AwLxtWrV/f4mG3btvX1RwFA2ookB6N7a6PPOn15pvcRAOhJcig6rbAAnIfOShCHAwGAszQo6NeHy0fov14+qMf+UanLxw11OiQASDne6rMCAB7RORjdWy/T1ulL2mEB6KvXDtZJki4aNdjZQACkNCsJInEoA0DquCUxIH3dzhpV17c6HA0ApB5v7a4BgEckZ4J4bjA6py8B9F1jW0h7jjZKksqpBAFwHroW2bIeAZAqJpfm6bKxhQpHY/rlpoNOhwMAKYckCAC4UNijM0GSLSjYdQDQB28cqlcsJo0cnKWi3EynwwGQwowulSDMBQGQSj5zxVhJ8QHpLR1hh6MBgNRCEgQAXChkVYKY3nqZNhlGCuAcbKuMD0UvpxUWgPPUvRKE9Qj6buXKlbrkkkuUm5uroqIi3XDDDdq9e/cZH7N69WoZhtHtlplJUh99c/WkIl0wJFv1rSH9bushp8MBgJTird01APCISDRRCeKxdlgm7bAAnIPXKuskSRfRCgvAeeo+E8TBQJCyXnzxRS1dulQvv/yynn/+eYVCIV1zzTVqbm4+4+Py8vJUXV2dvB08SEsj9I3PNHTrvNGSpEf/fkARPlQBwFnzOx0AAOB0oYhVCeKxJAjtsAD0USwWoxIEQL8xaYeF87R27dpuX69evVpFRUXaunWr3v/+9/f6OMMwVFJSMtDhweNuvLhMD7zwtvYfb9baHTW6fkap0yEBQEqgEgQAXCiSHIzurZdpn5UEYdMBwFk6eKJFp1pCyvCbmlKa53Q4AFKcwWB09LP6+npJUmFh4Rmva2pq0gUXXKCysjJ96EMf0s6dO+0IDx4zKOjXpxLVID/6617F+FwFAGfFW7trAOARXh2MbiRngjgbB1Lbxo0btXjxYg0fPlyGYeiZZ555z8ds2LBBF110kYLBoMaNG6fVq1cPeJzoH68lqkCmDc9Thp+lK4DzQyUI+lM0GtUXv/hFXX755Zo2bVqv102cOFGPPvqofv/73+tXv/qVotGo5s2bp0OHep/r0N7eroaGhm43QJI+NW+0sjN8equ6QX/dfdTpcAAgJfBJEgBcKNkOy2OVINZMkAibDjgPzc3Nmjlzph566KGzun7//v26/vrrddVVV6miokJf/OIXddttt2ndunUDHCn6w7bkPBBaYQE4f13Pl8SizsUBb1i6dKl27Nihxx9//IzXzZ07V0uWLNGsWbP0gQ98QE899ZSGDRumn/zkJ70+ZuXKlcrPz0/eysrK+jt8pKjBgzL0vy67QJL0o79QDQIAZ8Nbu2sA4BHJdlgeqwSx2mGxUMf5WLRokb7zne/owx/+8Fldv2rVKo0ZM0b33XefJk+erGXLlumf//mf9cADDwxwpOgP26qYBwKg/1AJgv6ybNky/elPf9Jf//pXjRw5sk+PDQQCKi8v1969e3u9ZsWKFaqvr0/eqqqqzjdkeMhtV4xRht/Ua5V1VIMg5b164KSe3naIfQIMKJIgAOBCoag322FZTydCPyzYaPPmzZo/f363+xYuXKjNmzc7FBHOVktHWG9VN0qSykcVOBsMAE/oOhOEylSci1gspmXLlunpp5/WX/7yF40ZM6bPf0YkEtH27dtVWtr7UOtgMKi8vLxuN8BSlJepWxOzQb735118vkLKau2I6NO/eFVf+s3r+n3FEafDgYf5nQ4AAHC6iMfbYbFGh51qampUXFzc7b7i4mI1NDSotbVVWVlZpz2mvb1d7e3tya/pw+2M7YfqFYnGVJKXqeEFp/93AoC+MgxDhiHFYlSC4NwsXbpUjz32mH7/+98rNzdXNTU1kqT8/PzkmmLJkiUaMWKEVq5cKUn61re+pcsuu0zjxo1TXV2dvv/97+vgwYO67bbbHHseSH3/cuU4Pf5qlfbUNul3Ww/pY5fQMg3Oi8ViOtbYriE5QfnedajzWGO7dtc0qrq+VRNLcjVjZIFeeKtWje1hSdK3//SmPjBhmAYPykg+5kRTu365+aBefueEVlw3qcfq8K0HT+lXLx9UOBrTyMFZmlicq9kXDNbIwVkyjLM7WLq7plF/33tcraGIJGlcUY5mlRWoOC/zrJ/3jsMNqjhUp/3HmtXUHlJpfpYml+bpminFyb0QOIckCAC4UMij7bC6tqCIxWJnvSAB7LZy5Urdc889ToeR9rZV1UmiCgRA/zINQ5FYTORAcC4efvhhSdKVV17Z7f5f/OIX+tSnPiVJqqyslGl2HmY6deqUPvvZz6qmpkaDBw/W7NmztWnTJk2ZMsWusOFB+dkB/Z+rx+k7z76l+57frUXTS5SbGej3n9PUHlbVyRYdbWxXY1tIDa1hNbSF1NweVkckqnAkpnAkqo7Er+FoLHF/VKFITOFoTIYkn2nINAz5TSP+e9OQz5B8pimfGf++zzTkN035TUN+n/WroYDPTHzPSH4v4DPkM61f44+zfm9dH/DF77fu8/sMGYofyovFYorG4l0Koon3hGgslnh/iH8vGo0l3y+iXe5L/j4WUzQaf46RaEyhxPMPJ3+NKRSNKhKJKRSNX2sYkiElPwtHY7H431PiMaZhyDSU/DuKRmNqD0cVikTVEY7KNAxlBkxlZviU6ffJNAy1hSNq7YioLRRRRySqzIBP2QGfsjJ8CvhMNXeE1dQWVmNbWC0dYWVn+FU4KEODszM0eFBArR0RHW9q14mmDh1ralfAZ6pscJbKCrNVVpgtQ9Kb1Q3ae7RJh+ta1doRUfmoAs0ZM0TTRuRpaE5QG3Yf0/Nv1eof+07oeFOHRg7O0pK5F+jG2WUaPChDT7xapTue2aGOSLzrRdBv6oXlH9DvKw5Lildqnmju0LeffVPf+8gMNbSF9OO/vqP//sdBtYfjj1n636/pz7e/X/nZ8X/nVSdb9LWn3tDf957o8d/uqMJsLZ5Zqg/NGqEJxbmnfb8tFNHT2w7rVy8f1M4jPR96m1icq0XTS7RoWqkmFOd028PoCEf1j/0n9NzOWr3wVq2q69t6/DM+Uj5C//efZyjgsUOuqYYkCAC4UDixMPD7vJUk6JrTiURjnnt+cKeSkhLV1tZ2u6+2tlZ5eXk9VoFI8T7cy5cvT37d0NDAQFIHvHbQmgdS4GwgADzFNKSIqATBuTmbnvUbNmzo9vUDDzzALDIMiE/MvUD/ufmgKk+26J4/vqkf3DjzrB8bjcZ08GSLaurbdKI5vgF+orlDJxKb4dX1rao82aJTLaEBfAZIVbtqGvXrV3qfVXToVKvuXbNL31+3W1OG5+v1xOGm0UOy1R6Oqrq+TXf+fodeevu4JOl7H5muf/vddj312mH96Y1qmYbUForvi8wcma9TLSFVnmzR15/Zrh/dXK6X9h7X//n1NtW1hOQ3DX30opEaV5SjqlMteuNQvXYeqVflyRY99Nd39NBf39GkklwtmFKsC4flyDQNvXbwlJ7dXq1jjfHq/4DP0BXjhmpYblDhaExvHmnQntpG7U7cHnzhbY0dOkiTS/OUneHTwZMteuNQXTJGScrO8GnOmEKNK8pRbmZAVSdb9NS2w3pq22E1tIX00McvUtDvG6D/IngvJEEAwIW8Ohi9awkoLbFgl7lz52rNmjXd7nv++ec1d+7cXh8TDAYVDAYHOjScQSwW61IJwlB0AP0nXpkaYy0CIOUF/T794MaZuumRzXpy6yFdPalI103vfdZMLBbTht3H9NgrlXr1wEnVnWWCoyA7oJK8TOVlBZSXGVB+VkCDgj5l+MxkVYZVaZHh66zkyEhUZMTUWVURiXbeoomvw9F3VVRE4xUmVnVF/NeYwtHOSotu9yUqTpJVGN2u7X5NNBZLVqWYRrwiwzQkn2HEf28q8b34/dbvjcTv44/tfJx1n/X8/VY1i/V3kqhmsSpSJCUrT2KKJe+3HpusQEn8ffkTf3aGP36LxeIVDG2hiFpDEUWi8c33rETlh9801BaKqjVxTXs4ouwMv3Iz/crNDCg7w6fm9rBOtXToZHNIp5o7lJXh09CcDA3NCWpoTlDt4agqT7ao6lSLqk62KByJaVJpriYW52pUYbZM09CWAye15eAp7apuVGsooguHDdJ100v1/gnDNKE4V2t3VOs/N8crLKwEyL8umKClV43Tm9UNWvyjl7Rh9zFJ0tThebrpklFqbAvrP/6yV/Wt8X+XM0bm68vXTNT7xg/VG4fq9dGHN+nZN6q1bkeNwok38Rkj8/Wjmy/SqCHZ3f7NtnSEtf6to/p9xRG9uOeodtU0aldN42n/tofnZ+rTV4zRRy8a2a0NlyTVtXTohbeOau2Oam18+7j2HW/WvuPN3a4ZmhPUgilFWjClWPMuHKrMQPckx8KpJVr62Gt64a2j+kPFEd14MQfrnEISBABcKOTVmSBG1yQIOw84N01NTdq7d2/y6/3796uiokKFhYUaNWqUVqxYocOHD+s///M/JUmf//zn9aMf/Uhf/epX9elPf1p/+ctf9MQTT+jZZ5916ingLByua9Wxxnb5TUPTR+Q7HQ4AD7HWI1GyIAA84NIxhfrCBy7Ujze8oxVPbVdxXqZmX9D9AEkoEtUfKo7okY37tLu2cyM46Dc1cnCWhgwKakhORvw2KKihORkqystU2eBslRVmDUibLaS2hVNLJMXfS+tbQ6clEG66ZJRuumSU9tQ2at2OGpWPGqwrxg+VJE0bka+PXjRST249JEn6cPkISdJt7xurz1wxRgdOtKixLaTpI/KT7admlhXoywsn6nt/3hVvsWZIH5tdpns+NPW0xIMkZWf4tXjmcC2eOVx1LR1au6NG2yrrtP9Es9rDUZWXFWjOmELNn1Lca5uqguwM/fPskfrn2SPV2BbS3/ceV019m5rawxqWG9RFowYnK0t6M39KsT45b7Qe2bhPbxyqJwniIJIgAOBCkWi8pDLgsUoQH0kQ9IMtW7boqquuSn5tta365Cc/qdWrV6u6ulqVlZXJ748ZM0bPPvusvvSlL+mHP/yhRo4cqZ/97GdauHCh7bHj7L1WWSdJmjI8r8cPNgBwrqzlFUsRAF7xxfkTtOmdE6qoqtNNP9msL84fr8vGDlEkGtNfdx/THyoO60hiXsGgDJ9uvnSU/mnmcE0dnsecApwX0zROS4B0NaE4t8d5HF++ZqL+vL1a4WhMi2cOT95vGIbGDB3U45/1+Q9cqOunl8rvM1SQlaGsjLP7jFCQnaH/eeko/c9LR53V9T3JzQzo2mm9V1mdyZTSPEnSrpqe547AHiRBAMCFrMHoPtNbC9Kuc9A5fIlzdeWVV56xH/fq1at7fMy2bdsGMCr0t22ViXkgZQXOBgLAc8wuw2gBwAsy/KZ+ddscfe13b+hPb1TrB8/tOe2aoTlBffqK0fr4nAuUn0VlB5xVkp+pP/yfK9QRjqo4L/OsH1dWmP3eF7nMpNJ4EmhXdaNisVi34eqwD0kQAHAh7w5G73w+EbIgAM5gW6IS5KILmAcCoH9ZyxGSIAC8JCfo13/cXK7Lxw3VU68dUm1Du9pCEc29cIjmTy7WginFVNfCVS4cluN0CLYYOzRHAZ+hxvawDte1auTg1EvkeAFJEABwobBHB6P7ujyfM53kB5De2kIR7TxSL0kqLyMJAqB/mcnBtA4HAgD9zDAM3XzpKN18Hm1/APSvDL+pC4flxIezVzeSBHGIt/qsAIBHhD07GL3z91SCAOjNziMNCkViGjIoQ2WFWU6HA8BjrMpUDmQAAAA7TGYuiOO8tbsGAB5hJQgCHmuHZXQbjO5gIABcLTkPZNRgeuYC6Hdmsh2Ws3EAAID0MKkkPhfkrepGhyNJXyRBAMCFQomZID6PtcOSOp8Tpy8B9MaaB1I+qsDROAB4k8FgdAAAYKNJiUqQt6gEcQxJEABwIWsmSMD03su0ldeJsPEAoBedlSAFzgYCwJNMBqMDAAAbTS6NV4IcON6s1o6Iw9GkJ+/trgGAByQHo3usHZbU2YebFhQAelJT36Yj9W0yDWnmyAKnwwHgQZ0zQRwOBAAApIVhOUENGZShaEx6+ygtsZxAEgQAXCjs4XZYySQIWRAAPbCqQCaW5GlQ0O9wNAC8yKQdFgAAsJFhGJpUas0FoSWWE0iCAIALJdth+bz3Mk0LCgBnsq2qThKtsAAMHIPB6AAAwGajCrMlSTX17Q5Hkp68t7sGAB7g6UoQk3ZYAHqXnAdSVuBsIAA8i0oQAABgt4zEIddwNOpwJOmJJAgAuFDEmgnixSRIYuMhQhYEwLt0hKN641C9JOmiCwY7HA0Ar7KWVzGSIAAAwCZWp4+OCEkQJ5AEAQAX6hyM7r2Xaau6hY0HAO+2q6ZB7eGo8rMCGjNkkNPhAPCozkoQhwMBAABpw9rfCUdYgDjBe7trAOAB3q4Eif/KxgOAd9tWWSdJmlVWkGydBwD9LTkThMUIAACwSYYvvgAJUQniCJIgAOBCViWIF2eCGLTDAtCL1xLzQC4aRSssAAOHShAAAGA3qx0WSRBnkAQBABfyciWIj2GkAHphVYKUjypwNA4A3mYlQWjNCQAA7BLwJ2aChFl/OIEkCAC4UDgaPxngxXYwne2weOMH0Ol4U7sqT7ZIkmaWFTgbDABPM2jNCQAAbEYliLNIggCAC0Ui3q0EsRI7bDwA6KoiUQUyrihH+VkBZ4MB4GkmVakAAMBm1kwQ69Ar7EUSBABcyMszQdh4ANCTznkgBc4GAsDzzMSnYNYiAADALn4f7bCcRBIEAFzI+lDuN733Mp1sh0UpCIAuOueBMBQdwMDqnAnicCAAACBt0A7LWd7bXQMAD/B0JQjtsAC8SzgSVUVVnSSGogMYeAZVqQAAwGaBRDsskiDOIAkCAC7k6ZkgiY2HCFkQAAm7ahrVGoooN+jXhKJcp8MB4HEmg9EBAIDNMqgEcRRJEABwIU9XgiSeUozTlwASth6MzwMpv2BwsloMAAYK88kAAIDdOtthsf5wAkkQAHAhq0rC7/PeZmDnxoPDgQBwDSsJcvEFzAMBMPA4kAEAAOzmpx2Wo0iCAIALhaPxN0VvVoIk2mGx8QAgwUqCzCYJAsAGBgcyAACAzWiH5SySIADgMtFoLPmh3Gd4LwniM2lBAaBTdX2rDte1yjSkmWUFTocDIA10zgRhLQIAAOwR8NMOy0kkQQDAZbpWSPhN771MJzceOH4JQNJrB+skSZNK8pQT9DsbDIC0QGtOAABgN2smSEeYShAneG93DQBSXKTLJ3KfB2eC0IICQFe0wgJgNysJwkwQAABglwAzQRxFEgQAXKZrEsTvwZkgtMMC0NXWysRQ9NEkQQDYw6AdFgAAsJk1EyTMiVBHkAQBAJfp+obozcHo8V9phwWgLRTRzsP1kqSLRpEEAWCPZDssDmICAACb+K3B6LTDcgRJEABwmW7tsDw4GJ0+3AAsr1fVKRyNqSg3qJGDs5wOB0CaYDA6AACwm9UOq4N2WI4gCQIALhNOHEs0Dcn0ZCUI7bAAxFmtsGZfMDg5LwgABlrnTBCHAwEAAGnDaofFTBBnkAQBAJexKkH8pjdfoq2nRRIEwGsMRQfgAIMDGQAAwGaBRBIkGuveAQT28OYOGwCksHAk/mboxXkgEpUgAOJisZi2kgQB4IDOdljOxgEAANJHwN+5DU81iP1IggCAy1gnAryeBOE9H0hv+48361RLSBl+U1OH5zsdDoA0woEMAABgN3+XPR6SIPYjCQIALhP2eBLEel5sPADpzaoCmTkyXxl+lqQA7GO15oyxFgEAADax2mFJUijCGsRufOIEAJexkgN+jyZBrKfFxgOQ3qwkyEW0wgJgs86ZIA4HAgAA0obPNJKHQqkEsR9JEABwGa/PBDFohwVAnUmQiy8odDgSAOmGdlgAAMAJAV98DdIRZkPEbiRBAMBlrJkgXq0E8bHxAKS9+paQ3j7aJEm6aFSBs8EASDsMRgcAAE6wWmJRCWI/kiAA4DLhaPzN0OfzZhLE6sNNEgRIX69VxatAxgwdpCE5QYejAZBurEoQWnMCAAA7ZSSSIGFOYtiOJAgAuExnJYg3X6KTfbh50wfS1mvWPJBRzAMBYD8jWQnCWgQAANjHTzssx3hzhw0AUph1IsCrM0F8DCMF0p41D2Q2Q9EBOMBkLQIAABxAOyznkAQBAJfx+kwQk9OXQFoLR6KqqKqTJF08miQIAPuxFgEAAE7ISCZBWIPYjSQIALiMVQlinVL0GtNkMDqQznbVNKqlI6LcTL/GDctxOhwAaahzJojDgQAAgLRCJYhzSIIAgMtEEoPR/V4djJ7YeOA9H0hPW7vMAzE9WvEGwN2YTwYAAJwQ8MfXICRB7EcSBABcxnov9OpMEFpQAOmNeSAAnNa5FnE2DgAAkF4CtMNyDEkQAHCZZCWIR5MgVnInRhIESEskQQA4rXMwOmsRAABgn4BJOyynkAQBAJexZoJ4tRLEoB0WkLaq61t1uK5VpiHNKitwOhwAacpaYnEgAwAA2Il2WM4hCQIALhNJJEH8pjdfon2cvgTS1iv7T0qSpo3I16Cg3+FoAKSr5EwQliIAAMBGVjusjjBJELt5c4cNAFJYOOLtShBOXwLpy0qCXDK60OFIAKQz2mEBAAAnWEmQMCcxbEcSBABcprMSxJtJkGQ7LDYegLRjJUEuHUMSBIBzrCUWaxEAAGCnDB8zQZxCEgQAXMbrM0Gs58XBByC9nGzu0NtHmyRRCQLAWWZiLUIOBAAA2Mnvi69BaIdlP5IgAOAykWj8zdCrSRDraUXJggBp5dUD8SqQ8UU5KhyU4XA0ANKZwVoEAAA4IJCsBGENYjeSIADgMl6vBDFN+nAD6YhWWADcwmQwOgAAcECAdliOIQkCAC7j9ZkgbDwA6YkkCAC3SFalciADAADYKCPRDoskiP1IggCAy0SSlSDefIlODiMlCwKkjab2sHYeqZdEEgSA83yGNROEtQgAALAP7bCc480dNgBIYWGPV4Kw8QCkn60HTykak8oKs1San+V0OADSnEFVKgAAcEDATzssp5AEAQCXSVaC+LyZBLE2HiIkQYC08cr+E5KkS0cPcTgSAOjampO1CAAAsE/ApB2WU0iCAIDLeL0ShJkg6A8PPfSQRo8erczMTM2ZM0evvPLKGa9/8MEHNXHiRGVlZamsrExf+tKX1NbWZlO0eHX/KUnSpWMGOxwJAHSdCeJsHAAAIL0wGN05JEEAwGUi0fiboc+jSZDEez7tsHDOfvOb32j58uW6++679dprr2nmzJlauHChjh492uP1jz32mL72ta/p7rvv1ltvvaWf//zn+s1vfqOvf/3rNkeentpCEVVU1UmSLh1DJQgA55kmrTkBAID9rHZYHWHWIHYjCQIALmNVglizM7wm2Q6L45c4R/fff78++9nP6tZbb9WUKVO0atUqZWdn69FHH+3x+k2bNunyyy/XLbfcotGjR+uaa67RzTff/J7VI+gfr1fVqSMS1bDcoEYPyXY6HACQkawEYS0CAADsY1WChKNUgtiNJAgAuEwk4u2ZIFaFCzkQnIuOjg5t3bpV8+fPT95nmqbmz5+vzZs39/iYefPmaevWrcmkx759+7RmzRpdd911vf6c9vZ2NTQ0dLvh3Lx64KQk6dLRhckkKAA4idacAADACRk+ZoI4xe90AACA7rw/EyT+a5SdB5yD48ePKxKJqLi4uNv9xcXF2rVrV4+PueWWW3T8+HFdccUVisViCofD+vznP3/GdlgrV67UPffc06+xp6t/7E8kQcYUOhwJAMSZVIIAAAAH+H20w3IKlSAA4DLWB3Kf6c2X6M7Tl7zpwx4bNmzQvffeqx//+Md67bXX9NRTT+nZZ5/Vt7/97V4fs2LFCtXX1ydvVVVVNkbsHeFIVFsPWkPRSYIAcAdrLcJSBAAA2InB6M6hEgQAXMb7lSC0oMC5Gzp0qHw+n2pra7vdX1tbq5KSkh4fc+edd+oTn/iEbrvtNknS9OnT1dzcrM997nP6xje+IbOHhGMwGFQwGOz/J5Bmdh5pUEtHRHmZfk0sznU6HACQ1DmfjAMZAADATgHaYTnGm8eMASCFJWeCeDYJEv81wsYDzkFGRoZmz56t9evXJ++LRqNav3695s6d2+NjWlpaTkt0+Hw+SVKMf4cDatM7JyRJl44ZItOjr2kAUk9nOyxn4wAAAOklg0oQx1AJAgAu4/VKECu5w+YzztXy5cv1yU9+UhdffLEuvfRSPfjgg2pubtatt94qSVqyZIlGjBihlStXSpIWL16s+++/X+Xl5ZozZ4727t2rO++8U4sXL04mQzAwNr1zXJJ0+bghDkcCAJ1ozQkAAJzQ2Q6LNYjdSIIAgMtEovETAV6tBEm2oODgA87RTTfdpGPHjumuu+5STU2NZs2apbVr1yaHpVdWVnar/LjjjjtkGIbuuOMOHT58WMOGDdPixYv13e9+16mnkBbawxG9eiA+FH3ehUMdjgYAOllLLA5kAAAAOwX8VII4hSQIALiM1ytBrNOXtMPC+Vi2bJmWLVvW4/c2bNjQ7Wu/36+7775bd999tw2RwbKtsk5toaiG5mRoQnGO0+EAQBIHMgAAgBMCJjNBnMJMEABwmUjU2zNBEtWfnL4EPM6aBzL3wqHJDUcAcAPaYQEAACd0VoKwBrEbSRAAcJlwMgnizZdoazM0wjRSwNM27U3MA7mQeSAA3IXB6AAAwAnWTJCOMJUgdvPmDhsApLCIx9th+ZKnLx0OBMCAaW4Pq6KqThLzQAC4j1UJQlUqAACwU8AXX4OE6clpO5IgAOAyXm+HZRW40IIC8K5X9p9UOBrTyMFZGjUk2+lwAKAbI1kJwloEAADYJ8NHOyynkAQBAJdJVoL4PJoEoQ834Hkv7jkmSXrfeKpAALiPSVUqAABwgNUOK0Q7LNuRBAEAl7HKIj1bCWJtPPCeD3jWht1HJUkfmFDkcCQAcDqqUgEAgBOsw64dETZE7EYSBABcxuszQawkSISNB8CTDhxv1oETLfKbhi4fx1B0AO7TORPE4UCQklauXKlLLrlEubm5Kioq0g033KDdu3e/5+N++9vfatKkScrMzNT06dO1Zs0aG6IFALhJZzsskiB2IwkCAC4TTs4E8eZLtJXbYRgp4E1WK6yLRw9WbmbA4WgA4HQGrTlxHl588UUtXbpUL7/8sp5//nmFQiFdc801am5u7vUxmzZt0s0336zPfOYz2rZtm2644QbdcMMN2rFjh42RAwCcZrXDisY6D8DCHn6nAwAAdOf5ShCTPtyAl9EKC4DbmQxGx3lYu3Ztt69Xr16toqIibd26Ve9///t7fMwPf/hDXXvttfrKV74iSfr2t7+t559/Xj/60Y+0atWqAY8ZAOAOAX/nYddQJCqf6XMwmvTizWPGAJDCwpH4B3LTq0kQqx0WWRDAc9pCEW3ed0KSdOXEYQ5HAwA9YzA6+lN9fb0kqbCwsNdrNm/erPnz53e7b+HChdq8eXOvj2lvb1dDQ0O3GwAgtQV8nfs8tMSyF0kQAHAZr1eCJKo/aYcFeNA/9p9UWyiqkrxMTSrJdTocAOgRrTnRX6LRqL74xS/q8ssv17Rp03q9rqamRsXFxd3uKy4uVk1NTa+PWblypfLz85O3srKyfosbAOCMgNm1EoR1iJ3OKQny0EMPafTo0crMzNScOXP0yiuvnPH6Bx98UBMnTlRWVpbKysr0pS99SW1tbecUMAB4nTUw3OfRJIjBYHTAs9btjG/mXDVpWPL/dQBwG4NKEPSTpUuXaseOHXr88cf7/c9esWKF6uvrk7eqqqp+/xkAAHuZppHc66ESxF59ngnym9/8RsuXL9eqVas0Z84cPfjgg1q4cKF2796toqLTez8/9thj+trXvqZHH31U8+bN0549e/SpT31KhmHo/vvv75cnAQBe4vVKkGQLCt7vAU8JR6JatyOeBFk0rdThaACgdyaD0dEPli1bpj/96U/auHGjRo4cecZrS0pKVFtb2+2+2tpalZSU9PqYYDCoYDDYL7ECANwj4DMUicbUEWZTxE59rgS5//779dnPfla33nqrpkyZolWrVik7O1uPPvpoj9dv2rRJl19+uW655RaNHj1a11xzjW6++eb3rB4BgHQVTmQHvFoJ4mPjAfCkV/af1InmDhVkBzT3wiFOhwMAveocjO5sHEhNsVhMy5Yt09NPP62//OUvGjNmzHs+Zu7cuVq/fn23+55//nnNnTt3oMIEALhUINEjPMxCxFZ9SoJ0dHRo69at3QZ6maap+fPn9zrQa968edq6dWsy6bFv3z6tWbNG11133XmEDQDeFYlYlSDeHNvUufHAGz7gJc9ur5YkLZxSklzYA4AbWZUgzATBuVi6dKl+9atf6bHHHlNubq5qampUU1Oj1tbW5DVLlizRihUrkl/ffvvtWrt2re677z7t2rVL3/zmN7VlyxYtW7bMiacAAHBQRuKzEu2w7NWndljHjx9XJBLpcaDXrl27enzMLbfcouPHj+uKK65QLBZTOBzW5z//eX3961/v9ee0t7ervb09+XVDQ0NfwgSAlGadBvBqJYhp0ocb8JpINJacB3LdDFphAXA3gwMZOA8PP/ywJOnKK6/sdv8vfvELfepTn5IkVVZWyuxyoGnevHl67LHHdMcdd+jrX/+6xo8fr2eeeeaMw9QBAN5kHRijHZa9+jwTpK82bNige++9Vz/+8Y81Z84c7d27V7fffru+/e1v68477+zxMStXrtQ999wz0KEBgCslZ4L4PJoEoR0W4Dmv7D+p403xVljzaIUFwOWYT4bzcTYVRBs2bDjtvhtvvFE33njjAEQEAEgl1l4PlSD26lMSZOjQofL5fH0a6HXnnXfqE5/4hG677TZJ0vTp09Xc3KzPfe5z+sY3vtHtdIRlxYoVWr58efLrhoYGlZWV9SVUAEhZnq8EsU5fUgoCeMZvt1RJohUWgNTAgQwAAOCUznZYrEPs1KdPqRkZGZo9e3a3gV7RaFTr16/vdaBXS0vLaYkOn88nqfcTFMFgUHl5ed1uAJAurEoQa4C419AOC/CWY43t+uMbRyRJt8wZ5XA0APDerAMZ5EAAAIDdkoPRqQSxVZ/bYS1fvlyf/OQndfHFF+vSSy/Vgw8+qObmZt16662S4gPARowYoZUrV0qSFi9erPvvv1/l5eXJdlh33nmnFi9enEyGAAA6hRO9GbxbCRJ/XhGyIIAn/PqVSoUiMc0qK9DMsgKnwwGA92RQCQIAABwS8MfXIR0kQWzV5yTITTfdpGPHjumuu+5STU2NZs2apbVr1yaHpb97ANgdd9whwzB0xx136PDhwxo2bJgWL16s7373u/33LADAQ6z+1F6dCWJVuJxNP2UA7tYRjupXLx+UJN16+WhngwGAs2QyGB0AADgkQDssR5zTYPRly5Zp2bJlPX7v3QPA/H6/7r77bt19993n8qMAIO14vRLESG48OBsHgPP35x3VOtrYrmG5QS2aVup0OABwVqzWnORAAACA3QKmlQShEsROTK4EABeJRmPJ5IDf9OZLdLIdFjsPQEqrbw3p3jVvSZI+cdkFyvB78zULgPdQCQIAAJxitcMiCWIvPq0CgIt0TQx4tRLEZ9IOC/CClWveUm1Du8YMHaTPvX+s0+EAwFnrnAnicCAAACDtWO2wOsIkQexEEgQAXKTrsHC/R5Mg1tNiMDqQuta/VavHX62SJP3fj85QZsDncEQAcPZMBqMDAACHWEmQMHsitjqnmSAAgIHR9U3Qq5UgnL4EUlc0GtPDL76j+57bLUlaMvcCXTqm0OGoAKBvrCUWORAAAGC3DB8zQZxAEgQAXCQS8X4liJXc4fQl0tk7x5pUdbIl+XW3/xve9b9GrMsd7/7fpuvX7/4/qmvLudO/9+6Iev8ZbeGITjaH9HZto/729nEdrmuVJH3kohH6+nWT3/0HAYDrUQkCAACcEvDF1yG0w7IXSRAAcJFwtPNN0KuVIMlhpJSCII098WqVfrJxn9NhnJPsDJ/uXjxFH7u4LFnZBQCpxGAwOgAAcIg/WQnCOsROJEEAwEWsORmmIc9uLpq0wwJUlJepqcPzkl+/+393Q0aP3+t22bseZPTyrXe/knR9ben9MZ1fBPyGCgcFNTw/U3MvHKI5Y4YoK4MZIABSF2sRAADglADtsBxBEgQAXCSSOJHoN02HIxk4ZqIUJMLpS6Sxz1wxRp+5YozTYQBAWrKSIDHWIgAAwGYZiXZYYZIgtvLuLhsApKBwohzSq62wpK7DSNl4AAAA9ku25mQpAgAAbGZVgnTQDstWJEEAwEWsdlheHYouST5aUAAAAAcZDEYHAAAOsWaCMBjdXiRBAMBFwonMgM/n3SSItfEQIQsCAAAckKwEYS0CAABsZh165TCGvUiCAICLpEUlSJfnRkssAABgt86ZIA4HAgAA0k5yTiqHMWxFEgQAXCQcjZdDpsNMEImWWAAAwH4m7bAAAIBDrBbhEdYhtiIJAgAu0lkJ4t2XZ6sdlsTJBwAAYD+DwegAAMAhiZEgtOW0mXd32QAgBVkzQTycA+lW5cIJTAAAYDeTXtwAAMAh1jokTBLEVh7eZgOA1JMOlSDd22Hxpg8AAOxlrUVYhgAAALslB6OTBLGVd3fZACAFWUkQb88E6VoJ4mAgAAAgLTETBAAAOMVkJogjSIIAgIt0VoKkSxKEN30AAGAvaynC5gMAALCbdeiVGan2IgkCAC4STotKkM7fU/4JAADsZh3IiMWkGIkQAABgIx+zyRxBEgQAXCQSjUrydiVI98HoDgYCAADSUteqVPYfAACAnZLtsNgQsRVJEABwkXDE+5UgRpeNB970AQCA3bpVpZIFAQAANupsh+VwIGmGJAgAuEjnTBBvvzxbb/q0oAAAAHYzqUoFAAAO8Rm0w3KCt3fZACDFWDNBPJ4DSZ7AZOMBAADYrWs7LDYgAACAnUwGozvC49tsAJBa0qUSxGqJFWHjAQAA2KxrOyyWIgAAwE6+xHYPBzHs5e1dNgBIMVYliJdngkhdyj85+QAAAGxGJQgAAHAKg9GdQRIEAFwkmqwE8XYSpLMdFm/6AADAXgaD0QEAgEN8tMNyBEkQAHCRdKkEMZODwBwOBAAApJ3ulSAOBgIAANIOg9GdQRIEAFwkEo1Kkvw+jydBTN70AQCAM7omQWKsRQAAgI0YjO4MkiAA4CKdlSDefnlOtsPiTR8AANjM7NYOy7k4AABA+vExE8QR3t5lA4AUE0mTmSA+k3ZYAADAGQaD0QEAgEN8ic4fEdYgtiIJAgAuki4zQQx6YAIAAAclq1JZiwAAABt1VoI4HEiaIQkCAC5iVYL4DG8nQayNB8o/AQCAE6y5IORAAACAnZKdMdgPsRVJEABwkXAkkQTx+GB0HxsPAADAQSZVqQAAwAHWGoR2WPYiCQIALmK9CXp9JojBmz4AAHCQkWyH5WwcAAAgvVAJ4gySIADgIpFovCmk12eCdA5G500fAADYL1kJwgYEAACwkS+xG8+hUHuRBAEAF7EGo3u9EsR6ejHe9HGOHnroIY0ePVqZmZmaM2eOXnnllTNeX1dXp6VLl6q0tFTBYFATJkzQmjVrbIoWAOA2nWsRZ+MAAADpJdkOi4MYtvI7HQAAoFPEmgliejtH3fmm73AgSEm/+c1vtHz5cq1atUpz5szRgw8+qIULF2r37t0qKio67fqOjg4tWLBARUVFevLJJzVixAgdPHhQBQUF9gcPAHAFZoIAAAAn0A7LGSRBAMBF0qYShHZYOA/333+/PvvZz+rWW2+VJK1atUrPPvusHn30UX3ta1877fpHH31UJ0+e1KZNmxQIBCRJo0ePtjNkAIDLdM4EYS0CAADsw2B0Z3j7qDEApBirHNLrM0Gsp8fJB/RVR0eHtm7dqvnz5yfvM01T8+fP1+bNm3t8zB/+8AfNnTtXS5cuVXFxsaZNm6Z7771XkUik15/T3t6uhoaGbjcAgHd0HshwOBAAAJBWrP0eOmPYiyQIALhI2lSCGGw84NwcP35ckUhExcXF3e4vLi5WTU1Nj4/Zt2+fnnzySUUiEa1Zs0Z33nmn7rvvPn3nO9/p9eesXLlS+fn5yVtZWVm/Pg8AgLOstQjzyQAAgJ18dMZwBEkQAHCRSDR+FMBMmyQIb/oYeNFoVEVFRXrkkUc0e/Zs3XTTTfrGN76hVatW9fqYFStWqL6+PnmrqqqyMWIAwEBLVqWyFAEAADZiMLozmAkCAC6SNpUgiRQ8PTDRV0OHDpXP51NtbW23+2tra1VSUtLjY0pLSxUIBOTz+ZL3TZ48WTU1Nero6FBGRsZpjwkGgwoGg/0bPADANQwOZAAAAAcwGN0ZVIIAgItE02QmiI8WFDhHGRkZmj17ttavX5+8LxqNav369Zo7d26Pj7n88su1d+9eRaOdTVf37Nmj0tLSHhMgAADvMxmMDgAAHGAdeg2TBLEVSRAAcJF0qQQxDAaB4dwtX75cP/3pT/XLX/5Sb731lr7whS+oublZt956qyRpyZIlWrFiRfL6L3zhCzp58qRuv/127dmzR88++6zuvfdeLV261KmnAABwWOdMEIcDAQAAacVqf05nDHvRDgsAXMTqCenzeTtHzelLnI+bbrpJx44d01133aWamhrNmjVLa9euTQ5Lr6yslGl2/j9UVlamdevW6Utf+pJmzJihESNG6Pbbb9e//du/OfUUAAAOYz4ZAABwgtUZg3ZY9iIJAgAuki6VIFa7L9ph4VwtW7ZMy5Yt6/F7GzZsOO2+uXPn6uWXXx7gqAAAqcJgMDoAAHAAM1Kd4e2jxgCQYiJpMhOEdlgAAMBJVIIAAAAn+Lq05ORgqH1IggCAi6RNJQgbDwAAwEHWUovNBwAAYKeuh14jlKTahiQIALhIJBovjfB6JYhV/kkSBAAAOKGzEsThQAAAQFoxuyZB2BOxDUkQAHCRcCQ92mHRggIAADgpOROELAgAALCR1RlDkqK0CLcNSRAAcJFImrTDSiZBeMMHAAAOoBIEAAA4wUcliCNIggCAi1hvgD7T2y/P1ns+b/gAAMAJZnIoKWsRAABgH9NgJogTvL3LBgApJl0qQayTD2w8AAAAJyTbYbEUAQAANupaCUJbTvuQBAEAF0mXmSAGLSgAAICDmE8GAACc0HW7h+4Y9iEJAgAuki6VIMl2WGRBAACAA6zOoyRBAACAnQzDSO6JUAliH5IgAOAi4cSkcK9XgtAOCwAAOKlzJojDgQAAgLRj7YmESYLYhiQIALhIshLE5+0kiNUOi0oQAADgBIN2WAAAwCFWEoQ9EfuQBAEAF7FOAVinE73Kx0wQAADgIJPB6AAAwCE+DmPYjiQIALiINRg94PP2y3PnxgNv+AAAwH4MRgcAAE4xqQSxnbd32QAgxVgzQbzeDouNBwAA4CTrQAbzyQAAgN2sdljsidiHJAgAuEgoUQniN7398myatMMCAADOMWjNCQAAHOJLzkl1OJA04u1dNgBIMaHEO2DA85Ug8V8p/QQAAE6gNScAAHAK7bDsRxIEAFwkfWaCxN/waUEBAACcYFIJAgAAHMJgdPt5e5cNAFJMKF1mgtAOCwAAOIgDGQAAwCk+KkFsRxIEAFwiEo3J+hwe8PpMENphAQAABxm0wwIAAA6xtnwirENs4+1dNgBIIaEuE7G8Xgni4/QlAABwULIdFgNJAQCAzZLtsDgYahuSIADgEuEub35enwli0IcbAAA4iMHoAADAKQxGt5+3d9kAIIWEu1aCmB6vBEk8vzBv+AAAwAGdM0EcDgQAAKQdqxKEdlj2IQkCAC4RinS++fk8ngSxKl26tgADAACwS2dVKpsPAADAXtaeD2057UMSBABcIpx49wv4jOQHc6/K8MfffjrCvOMDAAD7dbbDcjYOAACQfqyK1DBZENuQBAEAlwgnKkH8pvdfmjMSg9+pBAEAAE4wqQQBAAAO8ftYh9jN+zttAJAirISA9WboZVSCAAAAJ1lnTmJsPgAAAJtZhzE4F2ofkiAA4BLWkHBrXoaXZSSeYwfv+AAAwAGdM0EcDgQAAKQdayZIhIWIbby/0wYAKcKqivB7fCi6JAWoBAEAAA6iHRbOx8aNG7V48WINHz5chmHomWeeOeP1GzZskGEYp91qamrsCRgA4Co+1iG2IwkCAC5BJQgAAIA9rDMnnMDEuWhubtbMmTP10EMP9elxu3fvVnV1dfJWVFQ0QBECANzMasvJOsQ+fqcDAADEhRMJgUAazQRhMDoAAHCCVQnCAUyci0WLFmnRokV9flxRUZEKCgr6PyAAQEqx2mFRCWIf7x83BoAUEYrE3/z86VQJQjssAADggEQOhM0H2GrWrFkqLS3VggUL9Pe///2M17a3t6uhoaHbDQDgDZ2D0VmH2MX7O20AkCLC0fSZCZLBTBAAAOAgk8HosFFpaalWrVql3/3ud/rd736nsrIyXXnllXrttdd6fczKlSuVn5+fvJWVldkYMQBgIDEY3X60wwIAlwhH0mcmSCA5E4Q3fAAAYD8GksJOEydO1MSJE5Nfz5s3T++8844eeOAB/dd//VePj1mxYoWWL1+e/LqhoYFECAB4BOsQ+5EEAQCXsOZj+NNoJkhHOOJwJAAAIB1ZA0ljbD7AIZdeeqleeumlXr8fDAYVDAZtjAgAYBczWQnicCBpxPvHjQEgRYQTZZAB0/svzVYlSIhKEAAA4ACDdlhwWEVFhUpLS50OAwDgAKsSJMJhDNtQCQIALpFOlSBBZoIAAAAHmQxGx3loamrS3r17k1/v379fFRUVKiws1KhRo7RixQodPnxY//mf/ylJevDBBzVmzBhNnTpVbW1t+tnPfqa//OUveu6555x6CgAAB1kzQaKcxrANSRAAcAlrJog/DWaCJNthUfsJAAAcwGB0nI8tW7boqquuSn5tze745Cc/qdWrV6u6ulqVlZXJ73d0dOhf//VfdfjwYWVnZ2vGjBl64YUXuv0ZAID0YTIY3XYkQQDAJcLReEIgYHq/EiTZDotKEAAA4AArCcJMEJyLK6+88oz/dlavXt3t669+9av66le/OsBRAQBShZ8kiO28f9wYAFJEKFkJ4v0kiFUJ0k4lCAAAcIBBOywAAOAQk5kgtiMJAgAu0TkTxPsvzYFEoqcjHOUEJgAAsB3tsAAAgFOsbR8qQezj/Z02AEgR1kyQdGiHFfT5kr8P86YPAABsxmB0AADgFAaj248kCAC4RMiaCZIGlSBWOywpXg0CAABgp86ZIA4HAgAA0g7tsOzn/Z02AEgR4eRMEO+/NAe6zD0JMRcEAADYzDA4gQkAAJxBJYj9vL/TBgApIhyxKkG83w7L7zOTbSioBAEAAHbrbIflbBwAACD9UAliP5IgAOASocSncL+ZHi/NVkusdpIgAADAZp2D0dl8AAAA9rIqQWiMYZ/02GkDgBSQTpUgUufsE9phAQAAu1mVIDGSIAAAwGbJdlisQ2xDEgQAXCKUnAmSHkmQYKISpIMkCAAAsJlBGwoAAOCQZDss+nLahiQIALhEOBpPBqRLOyyrEoSZIAAAwG6dJzAdDgQAAKSdxHYISRAbpcdOGwCkgHCiEiRd2mFZM0FohwUAAOyW7MUdYfMBAADYy8dsMtuRBAEAl+hsh5UeL80ZPgajAwAAZ/gTSZAwJzABAIDNTJN2WHZLj502AEgBne2w0qMSpHMwOm/6AADAXslKkCiHMQAAgL18zASxHUkQAHAJqy1UIF0qQfzMBAEAAM6gEgQAADjF5yMJYrdz2ml76KGHNHr0aGVmZmrOnDl65ZVXznh9XV2dli5dqtLSUgWDQU2YMEFr1qw5p4ABwKs622GlRyVIBoPRAQCAQ3yJdQibDwAAwG7JShBmgtjG39cH/OY3v9Hy5cu1atUqzZkzRw8++KAWLlyo3bt3q6io6LTrOzo6tGDBAhUVFenJJ5/UiBEjdPDgQRUUFPRH/ADgGeE0rQRhMDoAALAblSAAAMApVlvOKOsQ2/Q5CXL//ffrs5/9rG699VZJ0qpVq/Tss8/q0Ucf1de+9rXTrn/00Ud18uRJbdq0SYFAQJI0evTo84saADzI+hAeSJdKENphAQAAh9CLGwAAOMVMVoI4HEga6dNx446ODm3dulXz58/v/ANMU/Pnz9fmzZt7fMwf/vAHzZ07V0uXLlVxcbGmTZume++9V5FIpNef097eroaGhm43APA6qyLCb6ZHJYiV7OmgEgQAANjMRyUIAABwCJUg9uvTTtvx48cViURUXFzc7f7i4mLV1NT0+Jh9+/bpySefVCQS0Zo1a3TnnXfqvvvu03e+851ef87KlSuVn5+fvJWVlfUlTABISeFIulWC+CRRCYJz09f5ZJbHH39chmHohhtuGNgAAQCu5k8OJGUdAgAA7GWaVKTabcCPG0ejURUVFemRRx7R7NmzddNNN+kb3/iGVq1a1etjVqxYofr6+uStqqpqoMMEAMeFEm9+VIIAZ2bNJ7v77rv12muvaebMmVq4cKGOHj16xscdOHBAX/7yl/W+973PpkgBAG7lY/MBAAA4hMHo9uvTTtvQoUPl8/lUW1vb7f7a2lqVlJT0+JjS0lJNmDBBPp8ved/kyZNVU1Ojjo6OHh8TDAaVl5fX7QYAXmcNRvenSSVI0BqMTiUI+qjrfLIpU6Zo1apVys7O1qOPPtrrYyKRiD7+8Y/rnnvu0dixY22MFgDgRn6SIAAAwCG+xI487bDs06ckSEZGhmbPnq3169cn74tGo1q/fr3mzp3b42Muv/xy7d27V9EuZcZ79uxRaWmpMjIyzjFsAPCeznZY6VEJkpF4nlSCoC/OZT6ZJH3rW99SUVGRPvOZz9gRJgDA5XyJyltmggAAALuZVILYrs87bcuXL9dPf/pT/fKXv9Rbb72lL3zhC2pubtatt94qSVqyZIlWrFiRvP4LX/iCTp48qdtvv1179uzRs88+q3vvvVdLly7tv2cBAB4QilqD0dOjEsRK9jATBH1xLvPJXnrpJf385z/XT3/607P+Oe3t7WpoaOh2AwB4B5UgAADAKbTltJ+/rw+46aabdOzYMd11112qqanRrFmztHbt2uRmRGVlpcwu/ezLysq0bt06felLX9KMGTM0YsQI3X777fq3f/u3/nsWAOABViWIP10qQfxUgmDgNTY26hOf+IR++tOfaujQoWf9uJUrV+qee+4ZwMgAAE6yNh+s9RcAAIBdrHVIlEoQ2/Q5CSJJy5Yt07Jly3r83oYNG067b+7cuXr55ZfP5UcBQNqwZoIE0mQmSDIJQiUI+qCv88neeecdHThwQIsXL07eZ7Xo9Pv92r17ty688MLTHrdixQotX748+XVDQ4PKysr662kAABxGJQgAAHBKsh0W6xDbnFMSBADQ/zqsShAzPSpBrHZYISpB0Add55PdcMMNkjrnk/V0QGPSpEnavn17t/vuuOMONTY26oc//GGviY1gMKhgMNjv8QMA3CFZCRJlHQIAAOyVrARhGWIbkiAA4BLWh/B0qQQJUgmCc7R8+XJ98pOf1MUXX6xLL71UDz744GnzyUaMGKGVK1cqMzNT06ZN6/b4goICSTrtfgBA+vD7OIEJAACcYVWCcBjDPiRBAMAl0m0mSHIwOpUg6KO+zicDAODdfIn3iTBJEAAAYLNkW06WIbYhCQIALhFK25kgvOuj7/o6n6yr1atX939AAICUwkwQAADglM52WKxD7MIxSQBwCeskYiBNKkEyqAQBAAAO6ZwJwuYDAACwl8lhDNulx04bALhcLBZLvvlZJxO9LpCsBIk4HAkAAEg3PjYfAACAQ3yJmSDRGOsQu5AEAQAXCHVpBJkuM0GsSpAQTTABAIDNkpUgVKQCAACbWSMsOYxhn/TYaQMAlwtHOz+Ap89MkPjz7Aiz+QAAAOzFTBAAAOAUqxIkQiWIbUiCAIALdKsEMdPjpTnD55PUORAeAADALsl2WGw+AAAAmzEY3X7psdMGAC7XtRVD+lSCWDNBSIIAAAB7WYdOqAQBAAB2MzmMYTuSIADgAuHEB3Cfacgw0iMJYiV72kmCAAAAmyVngpAEAQAANksORmc7xDYkQQDABayWUFZ/6nRgVYLQDgsAANjNWnPFYrSiAAAA9vIxm8x2JEEAwAXCiZkgAV/6vCxnJJ5rB0kQAABgM1+X9qNUgwAAADuZDEa3XfrstgGAiyUrQdJkHojETBAAAOCcrtW3nMIEAAB2YjC6/UiCAIALhBKVINaQznRAOywAAOAUn9m1EoS1CAAAsI/VBIRKEPukz24bALiY9eE7I40qQazWX6FIjNMPAADAVl0PnlAJAgAA7ORLrEMiEdYgdiEJAgAukKwESaeZIP7O5xriBCYAALBRl0IQZoIAAABb+ZgJYrv02W0DABcLp+NMkC4JH+aCAAAAOxmGkZwLQiUIAACwk1WQyhrEPiRBAMAFrBOIgXSaCUISBAAAOMiaC0IlCAAAsFNyMDqVILZJn902AHCxUBpWgphm5wnMEH0wAQCAzawNCPpxAwAAOyXbYXEQwzYkQQDABcJpOBNE6hyOTiUIAACwW2clCOsQAABgHzNZCSLFqAaxRXrttgGAS1kfvgNm+lSCSJ3D0TsiEYcjAQAA6YaZIAAAwAlWJYgUT4Rg4JEEAQAXCCUrQdI0CRLmXR8AANjLl5jFxkwQAABgJ7PLAVgOY9iDJAgAuECyEiTN2mFZw9E7IrShAAAA9qISBAAAOMFndq0EYR1ih/TabQMAl0pWgqRpO6wQSRAAAGAzH0kQAADggK7tsFiH2IMkCAC4gJUESL/B6PE3fgajAwAAu1ltSGmHBQAA7GR22fqJUAlii/TabQMAlwonKkECaTsThCQIAACwF5UgAADACd0Go7MOsQVJEABwAasShJkgAAAA9rDakFqz2QAAAOzgYzC67dJrtw0AXMpqw+A30+tl2Ur6UAkCAADs5kusu9h8AAAAdjIMQ1YxCOsQe6TXbhsAuFQ4WQlCOywAAAA7dFaCsPkAAADsZa1DmAliD5IgAOACocRMEH+6JUESlSAh2mEBAACbJWeCRNh8AAAA9jINZpPZiSQIALiA1Ys63dphJStBSIIAAACbUQkCAACcYh3GYDSZPdJrtw0AXCqcOIFIOywAAAB7JCtBSIIAAACb+QzaYdmJJAgAuEBnO6z0ellODkanEgQAANjMl6wEYR0CAADsZXIYw1bptdsGAC5lffgOmFSCAAAA2IFKEAAA4JRkOywqQWxBEgQAXCBdK0EYjA4AAJzCTBAAAOAUBqPbK7122wDApcKJJICfmSAAAAC28JnxdQibDwAAwG7WGVjWIfYgCQIALmBVQgTM9HpZtipBSIIAAAC7UQkCAACcYg1Gpx2WPdJrtw0AXCoUtdphpVclCIPRAQCAU3yJdVeUJAgAALAZg9HtRRIEAFygsx1Wer0sl+QHJUl7apscjgQAAKQbKkEAAIBTGIxur/TabQMAlwonBqNnpFklyLwLh0qSKqrq1NAWcjgaAACQTnzJE5hUpAIAAHv5koPRHQ4kTZAEAQAXSLbDSrOZIGWF2Ro9JFuRaEz/2HfS6XAAAEAaoRIEAAA4hXZY9kqv3TYAcKnOdljpVQkiSZePi1eDvPT2MYcjAQAA6cSXOHwSibD5AAAA7NVZCcI6xA4kQQDABax2WIE0mwkiSe8bH0+C/G3vcYcjAQAA6YRKEAAA4JRkW05mgtgi/XbbAMCFQole1NaH8XTy/7d37/FR1dfex79zycwkkIRLTMIllZuKCAIGiUEptU9eTb2gtlp51AOUKl7pxTxtFW3BamuoVQ+nFk1FKdqqoPVSj3CwNpVj0VgqEKWCKHJVSAAvSUjIbeb3/JHMhADRJOzZezLzeb86ryZ79k5WfgKzstes38ofliG3S9q2v1Z7Pj/kdDgAACBBeNiGAgAAOCQyGJ08xBYUQQAgBiRyJ0h6SpLGDO4jSVrzAd0gAADAHnSCAAAApzATxF5epwMAAEhNCTwTRJImj8jQ27s/19znN2rZv3YpOz0gSXKpdT0OW5bwhy5Xz1urXj6PFlx6utNhAAAASR5P+OZDyOFIAABAognf/mE7LHtQBAGAGBB+B6LXnXidIJL0nQmD9fK7Ffpg30Gt3/W50+FETZ+UJIogAADEiPBAUjpBAACA3dgOy14UQQAgBjS3doIkJWgnyIn9e+mVoin66LM6/XPbp6prbFY4DQi/KcIc9u6Inpoi+L0ep0MAAACtvGxDAQAAHOJ2MRjdThRBACAGNLXOBPEm4EyQww3um6LBuSlOhwEAABKAp7UDl04QAABgNw9vxrBVYt9tA4AYEZkJ4k7MThAAAAC7hWexBYPcfAAAAPaKbIdFJ4gtKIIAQAwIvwMxKcE7QQAAAOwSvvlAJwgAALBbZDuskMOBJAjutgFADGhK8JkgAAAAdvPyDkwAAOAQBqPbiyIIAMSAtiII/ywDAADYgU4QdNdrr72mqVOnauDAgXK5XHrhhRe+9JrVq1frjDPOkN/v14gRI7R06dKoxwkAiF0MRrcXd9sAwGHBkFF9U0sRJMXncTgaAACAxOCNDCRlHwp0TW1trcaOHatFixZ16vzt27frggsu0Lnnnqvy8nL96Ec/0jXXXKOXX345ypECAGJV+D2wDEa3h9fpAAAg0R1qCkY+7uXnn2UAAAA7eNwtdx+aGYyOLjrvvPN03nnndfr8kpISDR06VPfdd58k6dRTT9WaNWv0n//5nyosLIxWmACAGMZgdHvRCQIADqtraJYkuV2S38s/ywAAAHZo6wTh5gOiq6ysTAUFBe2OFRYWqqysrMNrGhoaVF1d3e4BAIgf4e2weDOGPbjbBgAOq21s6QTp5fPK5WIwOgAAgB2YCQK7VFRUKCsrq92xrKwsVVdX69ChQ8e8pri4WOnp6ZFHTk6OHaECAGzipRPEVhRBAMBhta2dICl+5oEAAADYxeuhEwSxa+7cuaqqqoo8du/e7XRIAAALuelItRWbzwOAw8IzQVJ8/JMMAABgl7ZOEAajI7qys7NVWVnZ7lhlZaXS0tKUnJx8zGv8fr/8fr8d4QEAHOBp3QkkSCeILegEAQCHRTpBfHSCAAAA2IWZILBLfn6+SktL2x175ZVXlJ+f71BEAACnRQajk4fYgiIIADis7rCZIAA6Z9GiRRoyZIgCgYDy8vK0du3aDs9dvHixJk+erL59+6pv374qKCj4wvMBAInB4275dZiZIOiqgwcPqry8XOXl5ZKk7du3q7y8XLt27ZLUspXVjBkzIudff/312rZtm37605/qvffe04MPPqinn35aN998sxPhAwBiQNt2WA4HkiAoggCAw5gJAnTN8uXLVVRUpPnz52v9+vUaO3asCgsLtW/fvmOev3r1al1xxRV69dVXVVZWppycHH3jG9/Qxx9/bHPkAIBY4mn9bZhOEHTVW2+9pfHjx2v8+PGSpKKiIo0fP17z5s2TJO3duzdSEJGkoUOHasWKFXrllVc0duxY3XfffXrkkUdUWFjoSPwAAOexHZa9eNsxADisbSYIRRCgM+6//37Nnj1bs2bNkiSVlJRoxYoVWrJkiW699dajzn/iiSfaff7II4/o2WefVWlpabt3aQIAEkukEyTIzQd0zde+9jWZL7hptXTp0mNes2HDhihGBQDoSdgOy150ggCAw2obGIwOdFZjY6PWrVungoKCyDG3262CggKVlZV16mvU1dWpqalJ/fr16/CchoYGVVdXt3sAAOILM0EAAIBT3HSC2IoiCAA4rK6xZTusXnSCAF/qwIEDCgaDysrKanc8KytLFRUVnfoat9xyiwYOHNiukHKk4uJipaenRx45OTnHFTcAIPaE34HZHGIzbgAAYK/wtpx0gtiDIggAOCzSCeKnEwSItgULFmjZsmV6/vnnFQgEOjxv7ty5qqqqijx2795tY5QAADvQCQIAAJziJg+xFXfcAMBhh5paB6Mn0QkCfJmMjAx5PB5VVla2O15ZWans7OwvvPbee+/VggUL9Le//U2nn376F57r9/vl9/uPO14AQOwKd4KwDQUAALAbg9HtRScIADiMThCg83w+n3Jzc1VaWho5FgqFVFpaqvz8/A6vu+eee3TXXXdp1apVmjBhgh2hAgBinLd1MHqQwegAAMBmDEa3F3fcAMBhzAQBuqaoqEgzZ87UhAkTNHHiRC1cuFC1tbWaNWuWJGnGjBkaNGiQiouLJUm//vWvNW/ePD355JMaMmRIZHZI79691bt3b8d+DgCAs9pmgnDzAQAA2Cs8GJ08xB4UQQDAYXSCAF0zbdo07d+/X/PmzVNFRYXGjRunVatWRYal79q1S253W7PrQw89pMbGRl122WXtvs78+fN1xx132Bk6ACCGeD3sxQ0AAJwRnk0WYjssW3DHDQAcRicI0HVz5szRnDlzjvnc6tWr232+Y8eO6AcEAOhx6AQBAABOYTC6vZgJAgAOq2ts6QRJpggCAABgGy83HwAAgEM8kTzE4UASBEUQAHBYuAjSy0dzHgAAgF3aOkG4+wAAAOzlcbEdlp0oggCAw2rD22H56QQBAACwi7d1fhSdIAAAwG7h2WRNtILYgiIIADisLjwYnU4QAAAA2zATBAAAOCU5qeWNsPVNFEHskJB33H5b+oFWbtzb7pirtQVJklxHnO9yfcnnh11x9HNf/MVcHT/V7jmvx620gFd9U3wantlbp2SnKn9YfwWSeOc40JM1BUNqbK36pzATBAAAwDbhIogxUihkIgNKAQAAoi08F7a+KehwJIkhIYsge6vq9V5FjdNhHLcUn0fnjszU984eqtwT+zodDoBuCM8DkegEAQAAsJPnsKJHc8jIRxEEAADYxO9tKYIcoghii4S843b1OUN0wZgBkc+N2tqfj5xFc2RjtDniBNPhJ+2/7pFfu6vfpzEYUk19s/ZVN2jr/oNat+NT7amq14p39mrFO3s1+aQM3X7BqRqZnSYAPUdd6zyQJI9LPi87FAIAANjFe1jRg7kgAADATuFOkEONFEHskJBFkBGZqRqRmep0GMfFGKN3PqrSE//cqWfXf6x/fHBAF/52ja6fMlxzvj6CbbKAHqKWeSAAAACOaN8JEpLE71AAAMAebTNBKILYgbcd91Aul0tjc/ronsvG6tX/9zV987RsNYeMfvfqVn3rwTf04f6DTocIoBPCFX/mgQAAANiLThAAAOCUcBGE7bDsQREkDnylf4pKpueq5D/OUP9ePm3eW62pD6zRi2/vcTo0AF+itnU7LIogAAAA9vJQBAEAAA5J9rXclqcTxB4UQeLIN0cP0P/8cLLyh/VXXWNQP3hqg+59eYtCJPRAzArPBOnlZzssAAAAO7lcrkghhCIIAACwE4PR7UURJM5kpgX0p2vydN2UYZKk3726VTc8sU61Dc0ORwbgWNpmgtAJAgAAYLdwEaSZIggAALBReDB6fVOIN7DbgCJIHPK4XZp73qm67ztj5fO49fK7lbqspEwffVbndGgAjhDpBGEwOgAAgO28dIIAAAAHhGeCSFJDc8jBSBIDRZA4dmnuYD117VnK6N0yJ+RbD76hf39c5XRYAA5T1zoYPZlOEAAAANvRCQIAAJwQOKwIwpZY0UcRJM7lnthXf5lzjkZmp2p/TYOm/b5M//hgv9NhAWgVLoLQCQIAAGC/tk4Q3oEJAADs43G75PMyHN0uFEESwKA+yXr6+nzlD+uv2sagZv3hX3pu/UdOhwVAiszrSfHTCQIAAGA3j7vlV2I6QQAAgN3CW2LRCRJ9FEESRFogSUu/d6YuGjtQzSGjoqff1oOrt8oYkn3ASXSCAAAAOCfcCdIc5PciAABgr0BSy635Q40UQaKNIkgC8Xs9WjhtnK796jBJ0j2rtmj+i+8yBBBwUHgwOjNBAAAA7OdhMDoAAHBIuBOE7bCijyJIgnG7Xbrt/FP18wtHyeWSHi/bqRufWMdfNsAhtZFOEIogAAAAdmMwOgAAcEqA7bBsQxEkQV19zlA9cMV4+TxuvfxupWY8ulZVh5qcDgtIOHWRmSBshwUAAGA3L50gAADAIeFdQdgOK/oogiSwC08fqMe+N1Gpfq/W7vhUl5eUqaKq3umwgIRSy0wQAAAAx7R1goQcjgQAACSayHZYzeQh0UYRJMHlD++v5dflKzPVry2VNfr2g69r674ap8MCEka42p/CdlgAAAC2YyYIAABwSng7rHo6QaKOIgg0amCanr1hkoZl9NKeqnpdVlKmdTs/czosICHUtg5GpwgCAABgP6+HmSAAAMAZycwEsQ1FEEiScvql6M83TNLYnD76vK5JVz3ypko3VzodFhD36hpat8NiJggAAIDtPO6WX4mDQYogAADAXgxGtw9FEET06+XTU7Pz9LVTTlB9U0jX/nGdnn5rt9NhAXGNThAAAADnRAajG4ogAADAXsm+llvzDEaPvm4VQRYtWqQhQ4YoEAgoLy9Pa9eu7dR1y5Ytk8vl0iWXXNKdbwsbpPi8Wjxjgi49Y7CCIaOf/vkdLXp1qwy/FACWM8aorpFOEAAAAKcwEwQAADglMhidTpCo63IRZPny5SoqKtL8+fO1fv16jR07VoWFhdq3b98XXrdjxw79+Mc/1uTJk7sdLOyR5HHr3u+cruunDJck/eblLbrjxXf5xQCwWENzKPL3KplOEAAAANuFO0GYCQIAAOxGEcQ+XS6C3H///Zo9e7ZmzZqlUaNGqaSkRCkpKVqyZEmH1wSDQV111VX6xS9+oWHDhh1XwLCHy+XSreeN1LwLR0mSHivbqR88tUENzfylBKyyv6ZBkuTzupVKJwgAAIDt2jpBQg5HAgAAEo2fmSC26VIRpLGxUevWrVNBQUHbF3C7VVBQoLKysg6vu/POO5WZmamrr766+5HCEd87Z6h+e8V4JXlcWrFxr7675F+qrm9yOiwgLuyrqZckZaX55XK5HI4GAAAg8UQ6QRiMDgAAbJYcKYLwZoxo61IR5MCBAwoGg8rKymp3PCsrSxUVFce8Zs2aNXr00Ue1ePHiTn+fhoYGVVdXt3vAOReNHag/fHeievk8Ktv2iab9/k3tq653Oiygx6usbukEyUoNOBwJAABAYvK4W34lZutfAABgt/DW6AxGj75uDUbvrJqaGk2fPl2LFy9WRkZGp68rLi5Wenp65JGTkxPFKNEZ55yUoeXX5Sujt0+b91br2w+9oW37DzodFtCjVVaHO0EoggAAADiBmSAAAMApzASxT5eKIBkZGfJ4PKqsrGx3vLKyUtnZ2Ued/+GHH2rHjh2aOnWqvF6vvF6vHn/8cb344ovyer368MMPj/l95s6dq6qqqshj9+7dXQkTUTJ6ULqevWGSTuyfoo8+O6TLSsr09u7PnQ4L6LHCnSCZaX6HIwEAAEhMHk94JghFEAAAYK8AM0Fs06UiiM/nU25urkpLSyPHQqGQSktLlZ+ff9T5I0eO1MaNG1VeXh55XHTRRTr33HNVXl7eYYeH3+9XWlpauwdiw4n9e+nZGyZpzKB0fVrbqCsWv6n/fX+/02EBPdI+OkEAAAAc5XHRCQIAAJwR3g6LTpDo6/J2WEVFRVq8eLEee+wxbd68WTfccINqa2s1a9YsSdKMGTM0d+5cSVIgENDo0aPbPfr06aPU1FSNHj1aPp/P2p8Gtsjo7ddT156lySdlqK4xqKuX/kvPb/jI6bCAHqfysMHoAAAAsF94O6xgiIGkAADAXgFvy615OkGiz9vVC6ZNm6b9+/dr3rx5qqio0Lhx47Rq1arIsPRdu3bJ7Y7qqBHEgN5+rx6deaZ+/MzbevHtPbp5+dvaX9Oga7863OnQgB6DwegAAADO8jATBAAAOCTSCcJg9KjrchFEkubMmaM5c+Yc87nVq1d/4bVLly7tzrdEDPJ53Vo4bZxOSPXr0TXbdffK97SvukG3nX+q3K2/TADoWHgweibbYQEAADjCG54JEqQIAgAA7JXMTBDb0LKB4+J2u/TzC0fptvNHSpIeWbNdNz9drsZm2smBL1LX2Kya+mZJbIcFAADgFDpBAACAUxiMbh+KILDEtV8drvsvHyuv26W/lO/R1Y/9Swcbmp0OC4hZ+1q3wkrxedTb362mPAAAABwnb+tWzkGKIAAAwGZtg9FDMoZcJJoogsAy3z5jsB6ZOUEpPo/+8cEBXfHwmzpwsMHpsICYFN4KKystIJeL7eMAAACcQCcIAABwSrgTRJIa2FUnqiiCwFJfOyVTT84+S/16+bTx4ypd+tAb2vlJrdNhATGnsqalQJiZylZYAAAATvG2FkFCvPsSAADYLOBtuzV/iOHoUUURBJYbl9NHf74+X4P7JmvnJ3W69KE39O+Pq5wOC4gp+w7rBAEAAIAzIp0gDEYHAAA283rc8nlabs8zFyS6KIIgKoad0FvP3TBJpw5I04GDjfq/D7+p17cecDosIGa0bYdFJwgAAIBTwp0gwRBbUAAAAPsFkiiC2IEiCKImMy2g5dedpfxh/XWwoVnf/cNavfj2HqfDAmJCZetgdDpBAAAAnONpHYzOTBAAAOCE8HB0tsOKLoogiKq0QJKWfu9MXTBmgJqCRj94aoN+/78fyrDnLhJcuBMkkyIIAACAY7yecCcIv58AAAD7JbcOR6+nEySqKIIg6vxej357xXh9d9IQSVLx/7ynn73wbzUHaTlH4trXOhg9i8HoAAAAjonMBKEIAgAAHBCIFEG4TxpNFEFgC4/bpTsuOk3zLhwll0t64p+7dM3jb+lgQ7PToQG2M8YcNhOEThAAAACntM0EoQgCAADsFy6CMBMkuiiCwFbfO2eoSv4jV4Ekt1Zv2a/vlJRpb9Uhp8MCbHWwoVl1rXs9ZjIYHQAAwDFuF50gAADAOckUQWxBEQS2KzwtW8uvzVdGb582763WJYte17t7qpwOC7DNpj3VkqSM3n6l+LwORwMAAJC4fN6WX4kPNdKhDgAA7BcejF7PYPSooggCR4zN6aPnbzxbIzJ7q7K6QZeXlOlvmyqdDguwxT8+OCBJOmdEf4cjAQAASGxD+veSJG3bX+twJAAAIBHRCWIPiiBwTE6/FD17wyRNGt5ftY1Bzf7jW1r06lYZQys64ttrH+yXJE0+6QSHIwEAAEhsJ2X1liTt+KRWDc3cfAAAAPbyJ7V2pVIEiSqKIHBUenKSHvveRP3HWV+RMdJvXt6i7z+1QXW0oyNOfVrbqI0ft2z/NvmkDIejAQAASGyZqX6lBbwKGWn7AbpBAACAvcKdIPUUQaKKIggcl+Rx65eXjNGvvjVaXrdLL72zV5c9VKaPPqtzOjTAcq9vPSBjpJHZqcpMCzgdDgAAQEJzuVw6KStVkvRB5UGHowEAAImG7bDsQREEMeOqvBP15Oyz1L+XT5v2VuuC367RK8wJQZz5R2QrLLpAAAAAYsFJmS1bYn2wjyIIAACwF4PR7UERBDFl4tB++sucszU2p4+qDjVp9uNv6ZcvbVJjc8jp0IDjZoyJDEVnHggAAEBsGBEuglTWOBwJAABINAE6QWxBEQQxZ3DfFD1zXb6uPmeoJOmRNdv1nd+XafenbI+Fnu2f2z/V3qp6+bxuTRzaz+lwAAAAILVth0UnCAAAsFnbdli8ATyaKIIgJvm8bv38wlF6eHqu0gJevb37c31z4Wv645s7FQoZp8MDuqyusVm3PPuOJOnb4wdFKv0AAABwVng7rB0HaulABwAAtop0grAdVlRRBEFM+8Zp2Vrxg8k6c0hf1TYG9fMX/q0rH3lTuz6hKwQ9yz2rtmjnJ3UakB7QbRec6nQ4AAAAaDUgPaBePo+aQ0Y7P6l1OhwAAJBA0pOTJEkf7KtRkDd+Rw1FEMS8nH4pWn5tvuZPHaXkJI/e3PapChe+psWvbeOdWugRHi/boaVv7JAk/frS05UWSHI2ICAOLFq0SEOGDFEgEFBeXp7Wrl37hec/88wzGjlypAKBgMaMGaOVK1faFCkAINa5XC6NYEssAADggCmnnKA+KUna+UmdXnpnj9PhxC2v0wEAneF2uzTr7KH6PyOzdMuz76hs2yf61crN+tM/d+r/feMUnT86W15P92t6xhg1BY2CIaOmUEjNQaPmYEjNIaPmYMuxYMioKdj6XPic1mMtz7Ucj3zcer3LJbnkktsluVs+kdvV8rmr9WOXyyWXwh+r9bm2Y253y9dwHXZcarleanlORx3TER+0nXfkOS5X20nGGIWMZGTU+j+FjJFp/dhEPm79f9P6vFo+llqvD3+NL3F47Ec918FTrnbPta6L2tbG1bq2LrlkZBQKHRZ7u5+hLc6W/yYuedwued0uJXnc8nndMsboUFNQ9U1B1TUGFQwZJfs8Sk7yKNnnkc/jVkNzSPVNQdU3hdQcCumEVL8yUwP6tLZRL72zR4+X7ZQkzZ48VF89mYHowPFavny5ioqKVFJSory8PC1cuFCFhYXasmWLMjMzjzr/jTfe0BVXXKHi4mJdeOGFevLJJ3XJJZdo/fr1Gj16tAM/AQAg1pyc2Vtv7/5c71fW6PwxA5wOBwAAJIjefq+uOWeo7v3r+/rd37dq6ukD5XZ3fK8M3eMyxsR8n011dbXS09NVVVWltLQ0p8OBw0Iho2fW7da9f31f+2saJEkD0wP6zoQc5Q3rp1Oz0xRI8ihkjD452KiK6nrt+KRWOz+p1Y5P6vTRZ4d0sL5JdY1B1TY0q64xqGbazRBlPyk8RTd+bXi7ghNwvBL19TEvL09nnnmmfve730mSQqGQcnJy9P3vf1+33nrrUedPmzZNtbW1eumllyLHzjrrLI0bN04lJSWd+p6JutYAkCgefu1D3b3yPY3MTtW8C0dpzOB0SUe8yab1zTc9nZWz6Xh9tA9rDQDxq7q+Secs+Luq65t1x9RRuuD0gUpL/vLehS96Y7HU8ZuLneZ1uyy7P9bZ10c6QdDjuN0uTTvzK7rw9IFa/I9t+mPZTu2pqtd/lX4glVr4fVyS1+2W19PSGeD1uCMdAl5PS8dAUvj51ufCz3vcLiW1niOprTPCmEjXRMi0dSSEwscP66AImcM6MA7rzgi17gAWLtscWccMfxruwmj7/PBzTPtjh51zeFeF+4hf9sKdKpEOFrVc4D6iEyPc8dK+Y+PoGNsdO9Z/hGOe1/Zzte/uOPznaTvmbg0iHIv7GL/Iulwt/z2CIRN5NAVDke3WDu/8cLtcqm8K6lBTUIcag2oMhuT3uhVI8ijg9cjtlvbVNGhfdYP69fJpcN9kzcgfom+Ozj7WTwigixobG7Vu3TrNnTs3csztdqugoEBlZWXHvKasrExFRUXtjhUWFuqFF17o8Ps0NDSooaEh8nl1dfXxBQ4AiGkThvSTyyW9V1GjKx/5p9PhRI3H7dKHd5/vdBgAAOAwaYEkzTp7qP6r9APd8d+bdMd/b3I6pKjadGehUnz2liUogqDH6uX36kcFJ+v6KcO14p29Wv3+fr2141PtraqPnBNIcuuEVL+G9O+lE/unaEj/Xsrpl6L05CT18nmV4vcopXVLo0ghw9NS3KD1DABiz4EDBxQMBpWVldXueFZWlt57771jXlNRUXHM8ysqKjr8PsXFxfrFL35x/AEDAHqEM77SVyu+P1lP/HOn/lK+Rwcbmp0OCQAAJJBrJg/Vlooabfy4SnuqDh3zDcToPoog6PECSR5dmjtYl+YOliQ1NofUGAzJJSnF52H7IQBAl82dO7dd90h1dbVycnIcjAgAEG2jBqbpV98ao7suHq3GYEtHcLtZeDq6CxsAAMAKqYEklUzPldRyb7OhOfiF539ZRhLLKUuyhVtzdhZFEMQdn7dloDUAIP5kZGTI4/GosrKy3fHKykplZx9727ns7OwunS9Jfr9ffr//+AMGAPQ4brdLAbf9v5wDAABI3NuMBlYTAAD0GD6fT7m5uSotbRsCFQqFVFpaqvz8/GNek5+f3+58SXrllVc6PB8AAAAAAMQPOkEAAECPUlRUpJkzZ2rChAmaOHGiFi5cqNraWs2aNUuSNGPGDA0aNEjFxcWSpB/+8IeaMmWK7rvvPl1wwQVatmyZ3nrrLT388MNO/hgAAAAAAMAGFEEAAECPMm3aNO3fv1/z5s1TRUWFxo0bp1WrVkWGn+/atUtud1uz66RJk/Tkk0/qZz/7mW677TaddNJJeuGFFzR69GinfgQAAAAAAGATl+kBk92qq6uVnp6uqqoqpaWlOR0OAAAxgddH+7DWAAAcjddH+7DWAAAcrbOvj8wEAQAAAAAAAAAAcYkiCAAAAAAAQBcsWrRIQ4YMUSAQUF5entauXdvhuUuXLpXL5Wr3CAQCNkYLAEBiowgCAAAAAADQScuXL1dRUZHmz5+v9evXa+zYsSosLNS+ffs6vCYtLU179+6NPHbu3GljxAAAJDaKIAAAAAAAAJ10//33a/bs2Zo1a5ZGjRqlkpISpaSkaMmSJR1e43K5lJ2dHXlkZWXZGDEAAImNIggAAAAAAEAnNDY2at26dSooKIgcc7vdKigoUFlZWYfXHTx4UCeeeKJycnJ08cUX691337UjXAAAIIogAAAAAAAAnXLgwAEFg8GjOjmysrJUUVFxzGtOOeUULVmyRH/5y1/0pz/9SaFQSJMmTdJHH33U4fdpaGhQdXV1uwcAAOgeiiAAAAAAAABRkp+frxkzZmjcuHGaMmWKnnvuOZ1wwgn6/e9/3+E1xcXFSk9PjzxycnJsjBgAgPhCEQQAAAAAAKATMjIy5PF4VFlZ2e54ZWWlsrOzO/U1kpKSNH78eG3durXDc+bOnauqqqrIY/fu3ccVNwAAiYwiCAAAAAAAQCf4fD7l5uaqtLQ0ciwUCqm0tFT5+fmd+hrBYFAbN27UgAEDOjzH7/crLS2t3QMAAHSP1+kAAAAAAAAAeoqioiLNnDlTEyZM0MSJE7Vw4ULV1tZq1qxZkqQZM2Zo0KBBKi4uliTdeeedOuusszRixAh9/vnn+s1vfqOdO3fqmmuucfLHAAAgYVAEAQAAAAAA6KRp06Zp//79mjdvnioqKjRu3DitWrUqMix9165dcrvbNt747LPPNHv2bFVUVKhv377Kzc3VG2+8oVGjRjn1IwAAkFBcxhjjdBBfprq6Wunp6aqqqqIFFACAVrw+2oe1BgDgaLw+2oe1BgDgaJ19fWQmCAAAAAAAAAAAiEsUQQAAAAAAAAAAQFyiCAIAAAAAAAAAAOISRRAAAAAAAAAAABCXKIIAAAAAAAAAAIC4RBEEAAAAAAAAAADEJa/TAXSGMUaSVF1d7XAkAADEjvDrYvh1EtFDLgIAwNHIRexDLgIAwNE6m4v0iCJITU2NJCknJ8fhSAAAiD01NTVKT093Ooy4Ri4CAEDHyEWij1wEAICOfVku4jI94C0boVBIe/bsUWpqqlwu13F9rerqauXk5Gj37t1KS0uzKMLExXpah7W0DmtpLdbTOlavpTFGNTU1GjhwoNxudriMJnKR2MV6Woe1tA5raS3W0zrkIj2XlbmIxN8rK7GW1mEtrcNaWov1tI5TuUiP6ARxu90aPHiwpV8zLS2NP7QWYj2tw1pah7W0FutpHSvXkndd2oNcJPaxntZhLa3DWlqL9bQOuUjPE41cROLvlZVYS+uwltZhLa3FelrH7lyEt2oAAAAAAAAAAIC4RBEEAAAAAAAAAADEpYQrgvj9fs2fP19+v9/pUOIC62kd1tI6rKW1WE/rsJaQ+HNgNdbTOqyldVhLa7Ge1mEtEcafBeuwltZhLa3DWlqL9bSOU2vZIwajAwAAAAAAAAAAdFXCdYIAAAAAAAAAAIDEQBEEAAAAAAAAAADEJYogAAAAAAAAAAAgLlEEAQAAAAAAAAAAcSkuiyCLFi3SkCFDFAgElJeXp7Vr137h+c8884xGjhypQCCgMWPGaOXKlTZF2jN0ZT0XL16syZMnq2/fvurbt68KCgq+dP0TSVf/bIYtW7ZMLpdLl1xySXQD7EG6upaff/65brrpJg0YMEB+v18nn3wyf9dbdXUtFy5cqFNOOUXJycnKycnRzTffrPr6epuijV2vvfaapk6dqoEDB8rlcumFF1740mtWr16tM844Q36/XyNGjNDSpUujHifsQS5iLXIR65CLWIdcxFrkI9YgH0EYuYi1yEWsQy5iHXIRa5GLWCNmcxETZ5YtW2Z8Pp9ZsmSJeffdd83s2bNNnz59TGVl5THPf/31143H4zH33HOP2bRpk/nZz35mkpKSzMaNG22OPDZ1dT2vvPJKs2jRIrNhwwazefNm893vftekp6ebjz76yObIY09X1zJs+/btZtCgQWby5Mnm4osvtifYGNfVtWxoaDATJkww559/vlmzZo3Zvn27Wb16tSkvL7c58tjT1bV84oknjN/vN0888YTZvn27efnll82AAQPMzTffbHPksWflypXm9ttvN88995yRZJ5//vkvPH/btm0mJSXFFBUVmU2bNpkHHnjAeDwes2rVKnsCRtSQi1iLXMQ65CLWIRexFvmIdchHYAy5iNXIRaxDLmIdchFrkYtYJ1ZzkbgrgkycONHcdNNNkc+DwaAZOHCgKS4uPub5l19+ubngggvaHcvLyzPXXXddVOPsKbq6nkdqbm42qamp5rHHHotWiD1Gd9ayubnZTJo0yTzyyCNm5syZvNi36upaPvTQQ2bYsGGmsbHRrhB7jK6u5U033WS+/vWvtztWVFRkzj777KjG2dN05oX+pz/9qTnttNPaHZs2bZopLCyMYmSwA7mItchFrEMuYh1yEWuRj0QH+UjiIhexFrmIdchFrEMuYi1ykeiIpVwkrrbDamxs1Lp161RQUBA55na7VVBQoLKysmNeU1ZW1u58SSosLOzw/ETSnfU8Ul1dnZqamtSvX79ohdkjdHct77zzTmVmZurqq6+2I8weoTtr+eKLLyo/P1833XSTsrKyNHr0aN19990KBoN2hR2TurOWkyZN0rp16yJtodu2bdPKlSt1/vnn2xJzPOH1Jz6Ri1iLXMQ65CLWIRexFvmIs3gNij/kItYiF7EOuYh1yEWsRS7iLLteg7yWfjWHHThwQMFgUFlZWe2OZ2Vl6b333jvmNRUVFcc8v6KiImpx9hTdWc8j3XLLLRo4cOBRf5gTTXfWcs2aNXr00UdVXl5uQ4Q9R3fWctu2bfr73/+uq666SitXrtTWrVt14403qqmpSfPnz7cj7JjUnbW88sordeDAAZ1zzjkyxqi5uVnXX3+9brvtNjtCjisdvf5UV1fr0KFDSk5OdigyHA9yEWuRi1iHXMQ65CLWIh9xFvlI/CEXsRa5iHXIRaxDLmItchFn2ZWLxFUnCGLLggULtGzZMj3//PMKBAJOh9Oj1NTUaPr06Vq8eLEyMjKcDqfHC4VCyszM1MMPP6zc3FxNmzZNt99+u0pKSpwOrcdZvXq17r77bj344INav369nnvuOa1YsUJ33XWX06EBwFHIRbqPXMRa5CLWIh8B0FOQi3QfuYi1yEWsRS7S88RVJ0hGRoY8Ho8qKyvbHa+srFR2dvYxr8nOzu7S+YmkO+sZdu+992rBggX629/+ptNPPz2aYfYIXV3LDz/8UDt27NDUqVMjx0KhkCTJ6/Vqy5YtGj58eHSDjlHd+XM5YMAAJSUlyePxRI6deuqpqqioUGNjo3w+X1RjjlXdWcuf//znmj59uq655hpJ0pgxY1RbW6trr71Wt99+u9xuauud1dHrT1paGu+67MHIRaxFLmIdchHrkItYi3zEWeQj8YdcxFrkItYhF7EOuYi1yEWcZVcuElf/RXw+n3Jzc1VaWho5FgqFVFpaqvz8/GNek5+f3+58SXrllVc6PD+RdGc9Jemee+7RXXfdpVWrVmnChAl2hBrzurqWI0eO1MaNG1VeXh55XHTRRTr33HNVXl6unJwcO8OPKd35c3n22Wdr69atkYRJkt5//30NGDAgoV/ou7OWdXV1R72Yh5OolplX6Cxef+ITuYi1yEWsQy5iHXIRa5GPOIvXoPhDLmItchHrkItYh1zEWuQizrLtNcjSMesxYNmyZcbv95ulS5eaTZs2mWuvvdb06dPHVFRUGGOMmT59urn11lsj57/++uvG6/Wae++912zevNnMnz/fJCUlmY0bNzr1I8SUrq7nggULjM/nM3/+85/N3r17I4+amhqnfoSY0dW1PNLMmTPNxRdfbFO0sa2ra7lr1y6Tmppq5syZY7Zs2WJeeuklk5mZaX75y1869SPEjK6u5fz5801qaqp56qmnzLZt28xf//pXM3z4cHP55Zc79SPEjJqaGrNhwwazYcMGI8ncf//9ZsOGDWbnzp3GGGNuvfVWM3369Mj527ZtMykpKeYnP/mJ2bx5s1m0aJHxeDxm1apVTv0IsAi5iLXIRaxDLmIdchFrkY9Yh3wExpCLWI1cxDrkItYhF7EWuYh1YjUXibsiiDHGPPDAA+YrX/mK8fl8ZuLEiebNN9+MPDdlyhQzc+bMduc//fTT5uSTTzY+n8+cdtppZsWKFTZHHNu6sp4nnniikXTUY/78+fYHHoO6+mfzcLzYt9fVtXzjjTdMXl6e8fv9ZtiwYeZXv/qVaW5utjnq2NSVtWxqajJ33HGHGT58uAkEAiYnJ8fceOON5rPPPrM/8Bjz6quvHvPfv/D6zZw500yZMuWoa8aNG2d8Pp8ZNmyY+cMf/mB73IgOchFrkYtYh1zEOuQi1iIfsQb5CMLIRaxFLmIdchHrkItYi1zEGrGai7iMoUcHAAAAAAAAAADEn7iaCQIAAAAAAAAAABBGEQQAAAAAAAAAAMQliiAAAAAAAAAAACAuUQQBAAAAAAAAAABxiSIIAAAAAAAAAACISxRBAAAAAAAAAABAXKIIAgAAAAAAAAAA4hJFEAAAAAAAAAAAEJcoggAAAAAAAAAAgLhEEQQAAAAAAAAAAMQliiAAAAAAAAAAACAuUQQBAAAAAAAAAABx6f8DrcPgncA5D/wAAAAASUVORK5CYII=",
"text/plain": [
"
"
]
},
"metadata": {},
"output_type": "display_data"
+ },
+ {
+ "ename": "",
+ "evalue": "",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[1;31mThe Kernel crashed while executing code in the the current cell or a previous cell. Please review the code in the cell(s) to identify a possible cause of the failure. Click here for more info. View Jupyter log for further details."
+ ]
}
],
"source": [
@@ -215,7 +380,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0]"
+ "version": "3.9.16"
},
"orig_nbformat": 4,
"vscode": {
diff --git a/MindFlow/applications/cfd/lax/numeric.yaml b/MindFlow/applications/cfd/lax/numeric.yaml
index 930cf2ca4ee602cc63250151216bf5b9e10c8b87..094fe642af1f300a1e6ed2eecf0e8866859b3a1a 100644
--- a/MindFlow/applications/cfd/lax/numeric.yaml
+++ b/MindFlow/applications/cfd/lax/numeric.yaml
@@ -3,7 +3,7 @@ mesh:
nx: 200
gamma: 1.4
x_range: [0, 1]
- pad_size: 3
+ pad_size: 4
material:
type: "IdealGas"
@@ -24,7 +24,7 @@ space_solver:
is_convective_flux: True
convective_flux:
reconstructor: 'WENO5'
- riemann_computer: 'Rusanov'
+ riemann_computer: 'Roe'
is_viscous_flux: False
diff --git a/MindFlow/applications/cfd/lax/solve_lax.py b/MindFlow/applications/cfd/lax/solve_lax.py
index 06abd7fb59c4439de62c31b470399fa5b20d6b46..422a17a1a4972b594d004c5ec28c3ee61dbd15d6 100644
--- a/MindFlow/applications/cfd/lax/solve_lax.py
+++ b/MindFlow/applications/cfd/lax/solve_lax.py
@@ -13,20 +13,60 @@
# limitations under the License.
# ==============================================================================
"""solve lax tube flow"""
+import argparse
+
from mindspore import context
-from mindflow import load_yaml_config, vis_1d
-from mindflow import cfd
+from src.ic import lax_ic_1d
+
+from mindflow import cfd, load_yaml_config, vis_1d
from mindflow.cfd.runtime import RunTime
from mindflow.cfd.simulator import Simulator
-from src.ic import lax_ic_1d
+parser = argparse.ArgumentParser(description="Sod compute")
+parser.add_argument(
+ "--mode",
+ type=str,
+ default="GRAPH",
+ choices=["GRAPH", "PYNATIVE"],
+ help="Running in GRAPH_MODE OR PYNATIVE_MODE",
+)
+parser.add_argument(
+ "--save_graphs",
+ type=bool,
+ default=False,
+ choices=[True, False],
+ help="Whether to save intermediate compilation graphs",
+)
+parser.add_argument("--save_graphs_path", type=str, default="./graphs")
+parser.add_argument(
+ "--device_target",
+ type=str,
+ default="GPU",
+ choices=["GPU", "Ascend"],
+ help="The target device to run, support 'Ascend', 'GPU'",
+)
+parser.add_argument("--device_id", type=int, default=0, help="ID of the target device")
+parser.add_argument("--config_file_path", type=str, default="./numeric.yaml")
+parser.add_argument("--reconstructor", type=str, choices=["WENO3", "WENO5", "WENO7"], default="WENO5")
+parser.add_argument("--riemann_computer", type=str, choices=["HLLC", "Roe", "Rusanov"], default="Roe")
+
+args = parser.parse_args()
-context.set_context(device_target="GPU", device_id=3)
+context.set_context(
+ mode=context.GRAPH_MODE if args.mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
+ save_graphs=args.save_graphs,
+ save_graphs_path=args.save_graphs_path,
+ device_target=args.device_target,
+ device_id=args.device_id,
+)
+print(f"Running in {args.mode.upper()} mode, using device id: {args.device_id}.")
-config = load_yaml_config('numeric.yaml')
+config = load_yaml_config(args.config_file_path)
+config["space_solver"]["convective_flux"]["reconstructor"] = args.reconstructor
+config["space_solver"]["convective_flux"]["riemann_computer"] = args.riemann_computer
simulator = Simulator(config)
-runtime = RunTime(config['runtime'], simulator.mesh_info, simulator.material)
+runtime = RunTime(config["runtime"], simulator.mesh_info, simulator.material)
mesh_x, _, _ = simulator.mesh_info.mesh_xyz()
pri_var = lax_ic_1d(mesh_x)
@@ -39,4 +79,4 @@ while runtime.time_loop(pri_var):
runtime.advance()
pri_var = cfd.cal_pri_var(con_var, simulator.material)
-vis_1d(pri_var, 'lax.jpg')
+vis_1d(pri_var, "lax.jpg")
diff --git a/MindFlow/applications/cfd/sod/README.md b/MindFlow/applications/cfd/sod/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..82b097de32685eb7dddeb322660f7fa9548834e1
--- /dev/null
+++ b/MindFlow/applications/cfd/sod/README.md
@@ -0,0 +1,48 @@
+ENGLISH | [简体中文](README_CN.md)
+
+# Sod Tube Problem
+
+## Overview
+
+The Sod shock tube problem, named after Gary A. Sod, is a common test for the accuracy of computational fluid codes, like Riemann solvers. In this case, MindFlow fluid simulation suite is used to solve the sod problem.
+
+## QuickStart
+
+### Run Option 1: Call `solve_sod.py` from command line
+
+```shell
+python solve_sod.py --mode GRAPH --save_graphs_path ./graphs --device_target GPU --device_id 0 --config_file_path ./numeric.yaml --reconstructor WENO5 --riemann_computer Roe
+```
+
+where:
+
+`--mode` is the running mode. 'GRAPH' indicates static graph mode. 'PYNATIVE' indicates dynamic graph mode. You can refer to [MindSpore official website](https://www.mindspore.cn/docs/en/r2.0/design/dynamic_graph_and_static_graph.html) for details.Default 'GRAPH'.
+
+`--save_graphs` indicates whether to save the computational graph. Default 'False'.
+
+`--save_graphs_path` indicates the path to save the computational graph. Default './graphs'.
+
+`--device_target` indicates the computing platform. You can choose 'Ascend' or 'GPU'. Default 'Ascend'.
+
+`--device_id` indicates the index of NPU or GPU. Default 0.
+
+`--config_file_path` indicates the path of the parameter file. Default './burgers_cfg.yaml'.
+
+`--reconstructor` indicates the reconstructor. You can choose 'WENO3', 'WENO5' or 'WENO7'. Default 'WENO5'
+
+`--riemann_computer` indicates the riemann computer. You can choose 'HLLC', 'Roe' or 'Rusanov'. Default 'Roe'
+
+### Run Option 2: Run Jupyter Notebook
+
+You can use [Chinese](./sod_tube_CN.ipynb) or [English](./sod_tube.ipynb) Jupyter Notebook to run the training and evaluation code line-by-line.
+
+## Results
+
+The following two figures depict the results of the Sod shock tube problem computed using different reconstruction schemes and Riemann computers. Firstly, the first figure demonstrates the outcomes obtained with various reconstruction schemes when the Roe Riemann computer is employed. Meanwhile, the second figure aims to illustrate the discrepancies resulting from different Riemann computers under the utilization of a fixed reconstruction scheme, namely WENO5. Here, the label "exact" represents the exact solution used as a reference for comparison. It should be noted that both figures exhibit certain oscillatory behavior in the computed results. This can be attributed to the adoption of a reconstruction approach based on physical space conservation variables, which may introduce some oscillations when performing high-order reconstruction in physical space. Accordingly, it can be observed from the results that WENO7 exhibits more pronounced oscillations compared to WENO3.
+
+
+
+
+## Contributor
+
+huxin2023
diff --git a/MindFlow/applications/cfd/sod/README_CN.md b/MindFlow/applications/cfd/sod/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..a5d1db43bcd6b8ba9cae3423698aec8cd74cc0a6
--- /dev/null
+++ b/MindFlow/applications/cfd/sod/README_CN.md
@@ -0,0 +1,47 @@
+[ENGLISH](README.md) | 简体中文
+
+# Sod 激波管问题
+
+## 概述
+
+Sod 激波管问题,以 Gary A. Sod 的名字命名,是用于测试计算流体代码(如 Riemann 求解器)准确性的常见测试问题。本案例采用 MindFlow 流体仿真套件,求解 Sod 激波管问题。
+
+## 快速开始
+
+### 训练方式一:在命令行中调用`solve_sod.py`脚本
+
+```shell
+python solve_sod.py --mode GRAPH --save_graphs_path ./graphs --device_target GPU --device_id 0 --config_file_path ./numeric.yaml --reconstructor WENO5 --riemann_computer Roe
+```
+
+其中,
+`--mode`表示运行的模式,'GRAPH'表示静态图模式, 'PYNATIVE'表示动态图模式,详见[MindSpore 官网](https://www.mindspore.cn/docs/zh-CN/r2.0/design/dynamic_graph_and_static_graph.html),默认值'GRAPH';
+
+`--save_graphs`表示是否保存计算图,默认值'False';
+
+`--save_graphs_path`表示计算图保存的路径,默认值'./graphs'
+
+`--device_target`表示使用的计算平台类型,可以选择'Ascend'或'GPU',默认值'GPU';
+
+`--device_id`表示使用的计算卡编号,可按照实际情况填写,默认值 0;
+
+`--config_file_path`表示配置文件的路径,默认值'./numeric.yaml';
+
+`--reconstructor`表示使用的重构格式,可以选择'WENO3'、'WENO5'或'WENO7',默认值'WENO5';
+
+`--riemann_computer`表示使用的 Riemann 求解器,可以选择'HLLC'、'Roe'或'Rusanov',默认值'Rusanov';
+
+### 训练方式二:运行 Jupyter Notebook
+
+您可以使用[中文版](./sod_tube_CN.ipynb)和[英文版](./sod_tube.ipynb) Jupyter Notebook 逐行运行训练和验证代码。
+
+## 结果展示
+
+下面的两幅图展示了针对 Sod 激波管问题采用不同的重构格式和 Riemann 求解器所计算得到的结果。首先是第一幅图,展示了在使用 Roe Riemann 求解器的情况下,采用不同的重构格式得到的结果。第二幅图旨在展示在使用固定重构格式 WENO5 的情况下,不同 Riemann 求解器之间的差异。其中,"exact"表示精确解,用作参考对比。需要注意的是,两幅图中的计算结果呈现出一定的振荡行为。这是因为本程序采用了基于物理空间的守恒变量重构,而在物理空间中进行高阶重构可能会引起一些振荡。从结果中也可以观察到,WENO7 的振荡比 WENO3 更为明显。
+
+
+
+
+## Contributor
+
+huxin2023
diff --git a/MindFlow/applications/cfd/sod/images/reconstructor.png b/MindFlow/applications/cfd/sod/images/reconstructor.png
new file mode 100644
index 0000000000000000000000000000000000000000..f0dbe4dd92740e7ac25db8eeebcbbb669cba2755
Binary files /dev/null and b/MindFlow/applications/cfd/sod/images/reconstructor.png differ
diff --git a/MindFlow/applications/cfd/sod/images/riemann_computer.png b/MindFlow/applications/cfd/sod/images/riemann_computer.png
new file mode 100644
index 0000000000000000000000000000000000000000..b25ff3c3f8c1c067b816c62c076d0c58e490de8b
Binary files /dev/null and b/MindFlow/applications/cfd/sod/images/riemann_computer.png differ
diff --git a/MindFlow/applications/cfd/sod/numeric.yaml b/MindFlow/applications/cfd/sod/numeric.yaml
index 342dc5628618937054855c851bbee20d2525d868..4b6c519eca188be52f4ae5ea3ddc2bfb45d4cecd 100644
--- a/MindFlow/applications/cfd/sod/numeric.yaml
+++ b/MindFlow/applications/cfd/sod/numeric.yaml
@@ -3,7 +3,7 @@ mesh:
nx: 100
gamma: 1.4
x_range: [0, 1]
- pad_size: 3
+ pad_size: 4
material:
type: "IdealGas"
@@ -24,7 +24,7 @@ space_solver:
is_convective_flux: True
convective_flux:
reconstructor: 'WENO5'
- riemann_computer: 'Rusanov'
+ riemann_computer: 'Roe'
is_viscous_flux: False
diff --git a/MindFlow/applications/cfd/sod/sod_tube.ipynb b/MindFlow/applications/cfd/sod/sod_tube.ipynb
index 30bb086c6e6c86d1ff40e44f0d986478c9aaff7e..83d3f1c52338ca16ea72e080301cc20aafa25ee8 100644
--- a/MindFlow/applications/cfd/sod/sod_tube.ipynb
+++ b/MindFlow/applications/cfd/sod/sod_tube.ipynb
@@ -55,9 +55,7 @@
"from mindflow.cfd.runtime import RunTime\n",
"from mindflow.cfd.simulator import Simulator\n",
"\n",
- "from src.ic import sod_ic_1d\n",
- "\n",
- "context.set_context(device_target=\"GPU\", device_id=3)"
+ "from src.ic import sod_ic_1d"
]
},
{
@@ -65,9 +63,17 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## Define Simulator and RunTime\n",
+ "## Setting up the MindSpore Runtime Environment\n",
+ "\n",
+ "Before running the program, the context should be configured. The commonly used parameters inside `context.set_context` are described as follows:\n",
+ "\n",
+ "`mode` represents the execution mode. 'GRAPH' indicates the static graph mode, 'PYNATIVE' indicates the dynamic graph mode. For more details, please refer to the [MindSpore official website](https://www.mindspore.cn/docs/en/r2.0/design/dynamic_graph_and_static_graph.html?highlight=pynative). The default value is 'GRAPH'.\n",
"\n",
- "The mesh, material, runtime, boundary conditions and numerical methods are defined in [numeric.yaml](https://gitee.com/mindspore/mindscience/blob/master/MindFlow/applications/cfd/sod/numeric.yaml)."
+ "`save_graphs` indicates whether to save the computation graph. The default value is 'False'.\n",
+ "\n",
+ "`device_target` represents the type of computing platform to be used, which can be either 'Ascend' or 'GPU'. The default value is 'GPU'.\n",
+ "\n",
+ "`device_id` represents the number of the computing card to be used. It can be filled in according to the actual situation. The default value is 0."
]
},
{
@@ -76,7 +82,44 @@
"metadata": {},
"outputs": [],
"source": [
- "config = load_yaml_config('numeric.yaml')\n",
+ "context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target=\"GPU\", device_id=0)"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Reading Configuration File\n",
+ "\n",
+ "This program provides multiple options for configuring the grid, materials, simulation time, boundary conditions, and numerical methods. These configurations can be set in the file named [numeric.yaml](./numeric.yaml). Users can choose different numerical methods according to their needs. The program supports the following numerical methods: WENO3, WENO5, and WENO7 for reconstruction, and Rsuanov, HLLC, and Roe for Riemann solvers.\n",
+ "\n",
+ "In addition to directly setting the configurations in the file, you can also modify the following code to select the desired numerical methods. In the code block below, the second and third lines are where the numerical methods are set. If you prefer to specify the numerical methods directly in the configuration file, you can comment out these two lines of code."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "config = load_yaml_config('numeric.yaml')"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Define Simulator and RunTime"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
"simulator = Simulator(config)\n",
"runtime = RunTime(config['runtime'], simulator.mesh_info, simulator.material)"
]
@@ -93,7 +136,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@@ -114,7 +157,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 6,
"metadata": {},
"outputs": [
{
@@ -122,40 +165,53 @@
"output_type": "stream",
"text": [
"current time = 0.000000, time step = 0.007606\n",
- "current time = 0.007606, time step = 0.005488\n",
- "current time = 0.013094, time step = 0.004744\n",
- "current time = 0.017838, time step = 0.004501\n",
- "current time = 0.022339, time step = 0.004338\n",
- "current time = 0.026678, time step = 0.004293\n",
- "current time = 0.030971, time step = 0.004268\n",
- "current time = 0.035239, time step = 0.004198\n",
- "current time = 0.039436, time step = 0.004157\n",
- "current time = 0.043593, time step = 0.004150\n",
- "current time = 0.047742, time step = 0.004075\n",
- "current time = 0.051818, time step = 0.004087\n",
- "current time = 0.055905, time step = 0.004056\n",
- "current time = 0.059962, time step = 0.004031\n",
- "current time = 0.063993, time step = 0.004021\n",
- "current time = 0.068014, time step = 0.004048\n",
- "current time = 0.072062, time step = 0.004039\n",
- "current time = 0.076101, time step = 0.004016\n",
- "current time = 0.080117, time step = 0.004049\n",
- "current time = 0.084166, time step = 0.004053\n",
- "current time = 0.088218, time step = 0.004045\n",
- "current time = 0.092264, time step = 0.004053\n",
- "current time = 0.096317, time step = 0.004062\n",
- "current time = 0.100378, time step = 0.004065\n",
- "current time = 0.104443, time step = 0.004068\n",
- "current time = 0.108511, time step = 0.004072\n",
- "current time = 0.112583, time step = 0.004075\n",
- "current time = 0.116658, time step = 0.004077\n",
- "current time = 0.120735, time step = 0.004080\n",
- "current time = 0.124815, time step = 0.004081\n",
- "...\n",
- "current time = 0.186054, time step = 0.004084\n",
- "current time = 0.190138, time step = 0.004084\n",
- "current time = 0.194222, time step = 0.004084\n",
- "current time = 0.198306, time step = 0.004085\n"
+ "current time = 0.007606, time step = 0.004957\n",
+ "current time = 0.012564, time step = 0.004426\n",
+ "current time = 0.016990, time step = 0.004285\n",
+ "current time = 0.021274, time step = 0.004200\n",
+ "current time = 0.025474, time step = 0.004197\n",
+ "current time = 0.029671, time step = 0.004117\n",
+ "current time = 0.033787, time step = 0.004086\n",
+ "current time = 0.037874, time step = 0.004124\n",
+ "current time = 0.041998, time step = 0.004134\n",
+ "current time = 0.046131, time step = 0.004123\n",
+ "current time = 0.050254, time step = 0.004096\n",
+ "current time = 0.054350, time step = 0.004096\n",
+ "current time = 0.058445, time step = 0.004082\n",
+ "current time = 0.062528, time step = 0.004082\n",
+ "current time = 0.066610, time step = 0.004079\n",
+ "current time = 0.070689, time step = 0.004071\n",
+ "current time = 0.074761, time step = 0.004079\n",
+ "current time = 0.078840, time step = 0.004079\n",
+ "current time = 0.082919, time step = 0.004084\n",
+ "current time = 0.087003, time step = 0.004088\n",
+ "current time = 0.091090, time step = 0.004094\n",
+ "current time = 0.095184, time step = 0.004100\n",
+ "current time = 0.099284, time step = 0.004103\n",
+ "current time = 0.103388, time step = 0.004096\n",
+ "current time = 0.107484, time step = 0.004105\n",
+ "current time = 0.111589, time step = 0.004106\n",
+ "current time = 0.115696, time step = 0.004097\n",
+ "current time = 0.119793, time step = 0.004090\n",
+ "current time = 0.123882, time step = 0.004087\n",
+ "current time = 0.127969, time step = 0.004080\n",
+ "current time = 0.132049, time step = 0.004078\n",
+ "current time = 0.136127, time step = 0.004072\n",
+ "current time = 0.140199, time step = 0.004074\n",
+ "current time = 0.144273, time step = 0.004074\n",
+ "current time = 0.148347, time step = 0.004077\n",
+ "current time = 0.152423, time step = 0.004077\n",
+ "current time = 0.156501, time step = 0.004077\n",
+ "current time = 0.160578, time step = 0.004083\n",
+ "current time = 0.164661, time step = 0.004085\n",
+ "current time = 0.168746, time step = 0.004088\n",
+ "current time = 0.172834, time step = 0.004091\n",
+ "current time = 0.176924, time step = 0.004091\n",
+ "current time = 0.181015, time step = 0.004092\n",
+ "current time = 0.185107, time step = 0.004090\n",
+ "current time = 0.189198, time step = 0.004088\n",
+ "current time = 0.193285, time step = 0.004090\n",
+ "current time = 0.197375, time step = 0.004090\n"
]
}
],
@@ -179,12 +235,12 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABkEAAAJtCAYAAACBs9diAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAACrx0lEQVR4nOz9eXzV933m/V/fs2qXQEILQuw7AoFxjGXsxE6ISZw4S5fxHad2h0nciWumbpjcTWhie7KZTpt4cv8yTpnQeOLeTSZOfSeOG7vOQky84Q0MZhH7IiHQjnbprN/fH2cRBAGS0Dmfs7yej4dKLOvAWzQBHV3n/b4s27ZtAQAAAAAAAAAAZBiH6QEAAAAAAAAAAAASgRAEAAAAAAAAAABkJEIQAAAAAAAAAACQkQhBAAAAAAAAAABARiIEAQAAAAAAAAAAGYkQBAAAAAAAAAAAZCRCEAAAAAAAAAAAkJEIQQAAAAAAAAAAQEZymR5gLMLhsM6ePavCwkJZlmV6HAAAUoJt2+rr69P06dPlcPC6hkTiaxEAAC7F1yLJw9ciAABcaqxfi6RFCHL27FnV1NSYHgMAgJTU1NSkGTNmmB4jo/G1CAAAl8fXIonH1yIAAFze1b4WSYsQpLCwUFLkkykqKjI8DQAAqaG3t1c1NTXxvyeROHwtAgDApfhaJHn4WgQAgEuN9WuRtAhBYqueRUVF/GUPAMAf4CRC4vG1CAAAl8fXIonH1yIAAFze1b4W4WgnAAAAAAAAAADISIQgAAAAAAAAAAAgIxGCAAAAAAAAAACAjEQIAgAAAAAAAAAAMhIhCAAAAAAAAAAAyEiEIAAAAAAAAAAAICMRggAAAAAAAAAAgIxECAIAAAAAAAAAADISIQgAAAAAAAAAAMhIhCAAAAAAAAAAACAjEYIAAAAAAAAAAICMRAgCAAAAAAAAAAAyEiEIAAAAAAAAAADISIQgAAAAAAAAAAAgIxGCAAAAAAAAAACAjEQIAgAAAAAAAAAAMhIhCAAAAAAAAAAAyEiEIAAAAAAAAAAAICMRggAAAAAAAAAAgIw07hDkpZde0p133qnp06fLsiw988wzV33Mjh07dN1118nr9Wr+/Pn64Q9/OIFRAQAAAAAAzOL7IgAApJdxhyADAwOqq6vT448/PqaPP3nypD7ykY/otttu0549e/TXf/3X+uxnP6tf/epX4x4WAAAAAADAJL4vAgBAenGN9wEf/vCH9eEPf3jMH79161bNmTNH3/72tyVJS5Ys0SuvvKL/8T/+h9avXz/eXx6G2batUNiWJFmWJYcV+REAAADIRuGwLYeDr4eBbML3RbKbbdsK25EfHZYli++LAEDKG3cIMl47d+7UunXrLnrf+vXr9dd//deXfYzP55PP54v/c29v76TO9PAv9uupt5om9edMN7G/ny1ZF/2zJNm2ZMuO/Bj9z2Fb8fBjNG6npcIctwpzXCrMcWlKnkdLq4pUW12s2upizZqax5NDAAAApBV/MKzGrgEdaxvQ8fZ+HW/v17nuYZ0f9Kt7MKDuIb+GA2G5HJY8Loe8Loe8LqcKol8Tx74+9jodcjktuZwOuR2WbEmBUFiBkK1AKKxg2FYoZCtk2wqHbdmKfH3tcjrkcTrkdlrK87iU73Uqz+NSnscpr8spj8sReXM6JNmRnydsKxCy5QuGNOgLadAf0qA/qEF/SAP+oAZ9kR99wXB03sjMXnfs14q9WQqEbA0HQxr2hzQcDMkfDMsfshUMhRUIhePPD2LPEhyWJa/LoRy3U7lup3LcThV4Y78fbuV7XXI7LDksSw5H5AVVweisvmBYw4GQBvwh9Q0H1DccVN9wUL5gSO7Y74Mr8vvniP0cluR0ODRjSq7mlxdofnmBZk3Nk8tJ9SVSSyp+X+R//f64vvu7Y5P6c6YbK/5/Ij9cGGTYduTPYtuWwrYdeQtLoQteGHrRz2VJbqdDU/M8mprvUWmBR9MKvXrP7Km6ZUGZZkzJS8anBAC4jISHIC0tLaqoqLjofRUVFert7dXQ0JByc3MvecyWLVv01a9+NWEzRZ4UhBP282ejQMhW14BfXQP++PtePtoR/8+FOS796eoa3X/rPE0r9JoYEQAAAFnKHwzrWFu/GrsGFAhFvpkVigYGw8GwfIGQhgMhDQVCOtczrDNdQ2o6P6iW3mHZl38dUFwwbCvojwQOUkCa3O9VYhzcTks5bqcU/calLen9i8v1P+++zvRoyGKp+X2RsPp9wYT9/NnGtiN/17T0Dquldzj+/p/tbpYkzS3L1y0LynRP/SzNLy80NSYAZK2EhyATsXnzZm3atCn+z729vaqpqZm0n/+LH1qkje+fP2k/XzLZtn3Na5Z29Jnc1Z7QORxW9NUQktOy4uevHNFf39bIqyP8wXD01VqRV2219g5r/9ke7WvuVcO5XvUNB/XEqyf1f95s1L03zdJ/fu88Tc33XNPnAQAAAFzOi4fb9G97z6rhXJ+OtfUpEBpDmjGKfI9T88oLNG9ageZNy1fN1DxNyfOoJM+tKXke5XmcCoRs+YNh+YIhDQfC6vONbDL0Dwfkj259BEO2guGwLEVeMeyKblw4HZZc0Q0HV3R7OhDfuLDlD4U16A9qwBfZ6hjwR7cyYm+hyM/pdFhyOS05HZHNiQKvU3lel/I9TuV6XCrwRn7M90S2SAKhsHyBcHwLIxiOfB6B6KaHy+mIbnREtjtimyIupyWP0xF/viBFXkEdCtvR34PI78OgP6QBX1D9vujvhS+gUDiyZR4LotxOR3wjJSe6PVJ0wRZNjtt50dZMIBSOvio78nP4AmGd7hzQsfZ+HW8b0FAgpEDo4m/s/vLdc/rmJwMqznVP/L9QQJIl+vsif3bjLH10xfRJ+/lMG+u3SWLfB7Hj/3zxRtuFHxf7/kfs5JXTEfnz2mFFfrSijwvbkUsavmBIXQN+dQ741dXvV2PXoF491qF3mrp1omNAJzoG9H/eatLfrF+k/7R2DtcyACCJEh6CVFZWqrW19aL3tba2qqioaNRXO0iS1+uV15u4bYGSPI9K2ERMmkAorFePdeh//Pao9jZ163/9/oT+ZedpfeWjS/WpG2aaHg8AAAAZpGvAr//27AE9u/fsRe8vzHFpfnmBvC7HRd/Ein/z3RX5Zn95UY5mTMlVzdQ81UzJU1mBh1vvaSIctnWud1i+QEiWFfkG5Se+96q6BwNqPj9ECAJjUvf7IrwwcbL94dmrz39woXqHA3rtWKd+9MZpvXy0Q994rkHbG9r0rf9Qp+qS0f//DwCYXAkPQerr6/X8889f9L7f/OY3qq+vT/QvjRThdjp066JyvW/hNL14uE2P/eaI9jf36m9/vk9T8z1av6zS9IgAAADIAM/vO6eHf7FfHf1+OSzpnhtnae38Mi2dXqTqklzCjAzncFiXfENxxpTcSAjSPaSl04sMTYZsx/dFsltRjlsfqq3U+mUV+tEbjfrmcw3aeaJTH/ofL+nv/2SFPry8yvSIAJDxxt0Y19/frz179mjPnj2SpJMnT2rPnj1qbGyUFFnZvPfee+Mf/7nPfU4nTpzQ3/zN3+jQoUP63ve+p5/+9Kf6/Oc/PzmfAdKGZVl6/+IK/dvGm/VnN86UbUsP/uQdvXum2/RoAAAASGO2beu//nSv/vJHu9XR79eC8gL97C/X6qsfr9Xtyyo1Y0oeAUiWioUizecHDU+CTML3RTARlmXpz26cpecfvEWrZpaozxfUg0/t0cmOAdOjAUDGG3cI8vbbb2vVqlVatWqVJGnTpk1atWqVHn74YUnSuXPn4n/xS9KcOXP03HPP6Te/+Y3q6ur07W9/W//0T/+k9evXT9KngHRjWZb+253L9L6F0zQcCOszT76t5u4h02MBAAAgTf1sd7P+v91n5HRY2njbfP3yr27WypoS02MhBVRH7yDzfAOTie+L4FrMKcvXv/7net2yoEz+YFhf/vm+eDcJACAxLDsN/qTt7e1VcXGxenp6VFTECnOm6BsO6E+37tShlj4trizUv36uXoU53OkFgLHi78fk4fcaSF09gwG9/9s71Dng1xc/tFj33zrP9EhIIT945aS+/suDumN5pb736dWmx8k4/P2YPPxeZ57GzkHd/p3fazgQ1rf+tE5/snqG6ZEAIO2M9e/HcW+CAJOlMMetH/zH92haoVeHWvr0+af2mh4JAAAAaeZbvz6szgG/5pcX6DM3zzE9DlLMyDksNkEApJaZpXn663ULJUnfeO6gOvt9hicCgMxFCAKjqkty9YM/v15up6XfNrRq1+ku0yMBAAAgTbx7plv/8sZpSdLXP14rj4unN7jYjCnREIRzWABS0GdunqMlVUXqHgzoG881mB4HADIWzxJg3IoZJfrj6yJrn/+447jhaQAAAJAOQmFbX3lmv2xb+uSqatXPKzU9ElJQbBOko9+v4UDI8DQAcDG306G/+6Plsizp5+8066Uj7aZHAoCMRAiClPAX750ry5J+29Cmwy19pscBAABAivvxm41690yPCr0ubb5jselxkKJK8tzK8zglsQ0CIDXV1ZToP940W5L0lWf2KxAKmx0IADIQIQhSwtxpBbqjtkqStPX3bIMAAADg8s4P+PUPLxySJH1h/SKVF+YYngipyrIsekEApLz/evsiTc33qLFrUK8c6zA9DgBkHEIQpIz7b50nSXp271k1dQ0angYAAACp6t/ePave4aAWVRTqz26cZXocpLhqekEApLgCr0sfXRF5Yeize84angYAMg8hCFJGbXWxbllQplDY1raXT5geBwAAACnqmXeaJUn/4T01cjosw9Mg1bEJAiAdfHzldEnSrw+0aMhPhxEATCZCEKSU2DbIU281qb3PZ3gaAAAApJrGzkHtbuyWZUl3Rl81C1wJmyAA0sF1M6eouiRXA/6QfneozfQ4AJBRCEGQUurnlqqupkS+YFj/+9WTpscBAABAivm3dyNnQm6aV6ryIrpAcHVsggBIB5Zl6WPRbZBf7Gk2PA0AZBZCEKQUy7L0l9FtkP9352kN+IKGJwIAAECqsG07fgrr4yurDU+DdDGDTRAAaeJjdZEQZMfhdvUMBQxPAwCZgxAEKeeDSyo0uzRPfb6gftvQanocAAAApIiGc3062tYvj8uhD9VWmh4HaaK6JE+S1NI7rGAobHgaALi8xZWFWlBeIH8orF8daDE9DgBkDEIQpByHw9Kd0Vc/PPfuOcPTAAAAIFX8Ym9kC+T9i8pVlOM2PA3SRXmhV26npVDYVkvvsOlxAOCyLMuKF6Q/u+es4WkAIHMQgiAlfSRacrnjSLv6hlkBBQAAyHbhsK1/i35DKPYNImAsHA5L0+kFAZAmYi8Kfe14h9r6CG4BYDIQgiAlLaoo1Lxp+fIHw5zEAgAAgN461aWzPcMq9Lp02+Jy0+MgzcTL0ekFAZDiZpXmq66mRGFbep7rGAAwKQhBkJIsy9JHV3ASCwAAABG/2BvZAvlQbaVy3E7D0yDdVLMJAiCNfDy6DRL7uw8AcG0IQZCyYiexfn+kXT1DnMQCAADIVv5gWM/vi7ww5uMrqw1Pg3RUPYVNEADp46MrquSwpHcau9XUNWh6HABIe4QgSFkLKwq1sKJAgZCt3xzkJBYAAEC2euVYu7oHA5pW6FX9vFLT4yANcQ4LQDopL8rR9bOnSpJePtpheBoASH+EIEhpIyexWAEFAADIVi8eapckfWhZpZwOy/A0SEfxTRDOYQFIEzdFQ/+dJzoNTwIA6Y8QBCntjuWRk1gvH+1Q96Df8DQAAAAw4ZVjkVfB3rKgzPAkSFczSvIkRTZBbNs2PA0AXN2NcyMhyOsnOvlzCwCuESEIUtr88gItrixUMGzr1wc4iQUAAJBtzpwf1MmOATkdlm7kFBYmqLI4R5Yl+YJhdfTz4ioAqW9lTYk8Lofa+3w60TFgehwASGuEIEh5d9ZFTmL9MlqGCQAAgOzxanQLpG5GsYpy3IanQbryuByqKMyRRC8IgPSQ43Zq9cwpkqSdxzmJBQDXghAEKS92EuvVYx3qGuBVWwAAANkkVgh784JphidBuqMXBEC6ufAkFgBg4ghBkPLmlOVr2fQihcK2ftvASSwAAIBsEQ7bei366lf6QHCtqkuiIUj3oOFJAGBs6ufFQpAuekEA4BoQgiAtfGBJhSTp94fbDU8CAACAZDl4rlddA37le5xaWVNiehykOTZBAKSbuppieV0OdfT7dLy93/Q4AJC2CEGQFm5dFDl/8PLRdgVDYcPTAAAAIBleifaB3Di3VG4nT11wbUY2QQhBAKQHr8up62fTCwIA14pnEkgLdTNKVJLnVu9wUHuauk2PAwAAgCSIlaLfzCksTILYJsgZNkEApJEb54ycxAIATAwhCNKC02HplmgZ5g5OYgEAAGS84UBIb56MfMPn5vmEILh2M9gEAZCGRnpBOukFAYAJIgRB2rh1YTQEOdJmeBIAAAAk2tunzssXDKuiyKv55QWmx0EGiG2C9A0H1TscMDwNAIzNihklynU71Tng19E2ekEAYCIIQZA23hsNQfY396qtb9jwNAAAAEikWB/IzfOnybIsw9MgE+R5XCrJc0uSznXzfAJAevC4HPSCAMA1IgRB2phW6NXy6mJJ0ktHOgxPAwAAgER65VjkBOrNC0oNT4JMku9xSZKGAiHDkwDA2N04d+QkFgBg/AhBkFZuXRTrBeEkFgAAQKbqGvDrwNleSdJa+kAwibzuyFNgfzBseBIAGLsb506VFAlBwmF6QQBgvAhBkFZiIcjLRzsUDPHEBQAAIBO9drxDti0trixUeWGO6XGQQTxOQhAA6SfWC3J+MKAjbX2mxwGAtEMIgrSysmaKinPd6hkKaO+ZbtPjAAAAIAFePRY598EWCCab1xUNQUKcwwKQPtzOkV6Q1+kFAYBxIwRBWnE6LN2yIPJkeMfhdsPTAAAAIBHeOtUlaeQGOjBZPC42QQCkp+tnRU5i7WnqNjsIAKQhQhCknVsXlUsiBAEAAMhEXQN+HWvrlyStnjXF8DTINLEQxEcIAiDNrJxZIokQBAAmghAEaee9CyObIPuae9Te5zM8DQAAACbTrtPnJUnzyws0Nd9jeBpkmlgnCCEIgHSzckaJJOlU56DOD/jNDgMAaYYQBGmnvDBHy6YXSZJeOsI2CAAAQCaJncJ6z+yphidBJuIcFoB0VZzn1tyyfEnSHjpSAWBcCEGQlm5dNE2S9MqxDsOTAAAAYDKNhCCcwsLk87ickghBAKSnlTUlkqQ9jd1G5wCAdEMIgrS0dn7kJNYrxzpk27bhaQAAADAZhvwh7W/ukcQmCBIjdg7LHyIEAZB+6AUBgIkhBEFaum7mFHldDrX3+eLFmQAAAEhve890KxCyVVHk1YwpuabHQQbiHBaAdBbbBNl7ppsXhALAOBCCIC3luJ26YU7k1YGcxAIAAMgMb50c6QOxLMvwNMhEXkIQAGlscWWRPC6HugcDOtU5aHocAEgbhCBIW7GTWK8e6zQ8CQAAACbDW6fPS+IUFhInvgnCOSwAacjjcqh2epEkaU/TecPTAED6IARB2lo7LxKCvH6iU0GexAAAAKS1UNjW7mgIcj2l6EiQeCcImyAA0tTKmsjfkZSjA8DYEYIgbS2dXqSSPLf6fUHtPdNjehwAAABcg4Zzver3BVXodWlxZZHpcZChYuewfIQgANIU5egAMH6EIEhbToel+rmlkqTX6AUBAABIa2+fivSBXDdripwO+kCQGBSjA0h3q6Ll6AfP9Wo4EDI7DACkCUIQpLVYLwjl6AAAAOltpA+EU1hIHDpBAKS7GVNyVZrvUSBk6+C5XtPjAEBaIARBWrs5GoK809itQX/Q8DQAAACYCNu245sg11OKjgQa2QTh1dMA0pNlWVoZ3QahFwQAxoYQBGltVmmeqkty5Q+F9dap86bHAQAAwAQ0dQ2ptdcnt3PkGztAIsSK0ekEAZDO4iEIvSAAMCaEIEhrlmVp7fxIL8irnMQCAABIS29Ft0CWVxcrx+00PA0yGZ0gADIB5egAMD6EIEh7sV4QQhAAAID09PbpSAjyHk5hIcG8hCAAMsCKGSWSpMauQXX2+8wOAwBpgBAEae+meZEQ5MDZXnUN+A1PAwAAgPGKnTVdPYtSdCQWxegAMkFxrltzp+VLkvae6TY7DACkAUIQpL1phV4triyUJO083ml4GgAAAIxHz2BAx9r6JUnXEYIgwTzOyLk1NkEApDvK0QFg7AhBkBFi2yCvcBILAAAgrbzTFNkCmVWap7ICr+FpkOnoBAGQKVZFQ5C9Z3rMDgIAaYAQBBkhVo6+8zghCAAAQDrZHX0F63Uz2QJB4sVCEB8hCIA0tzzaC7K/uUe2bZsdBgBSHCEIMsINc6bK6bB0qnNQzd1DpscBAADAGL3TGNkEuW5midlBkBU8TjpBAGSGxZWFcjksdQ74dbZn2PQ4AJDSCEGQEQpz3FpeXSyJXhAAAIB0EQ7b8Vvmq9gEQRJwDgtApshxO7WwItKPuo+TWABwRYQgyBg3zYucxHqNk1gAAABp4Whbv/p8QeV5nFpcWWh6HGQBLyEIgAwSezHo/mZCEAC4EkIQZIxYOfrrxzu5hwkAAJAGdkdPYa2YUSyXk6cmSLx4CMI5LAAZYPmMSAjyLiEIAFwRzzSQMVbPmiKP06GzPcM63TloehwAAABcxe7TsT4QTmEhOWLnsEJhW0GCEABp7sJNEF4MCgCXRwiCjJHrcWpltFDzNXpBAAAAUt7uRkIQJFcsBJHYBgGQ/hZXFcrttNQ14Fdz95DpcQAgZRGCIKPQCwIAAJAeegYDOt4+IElaFX0hC5BongvOrtELAiDdeV0j5ej0ggDA5RGCIKPEekF20gsCABnt8ccf1+zZs5WTk6M1a9bozTffvOLHf+c739GiRYuUm5urmpoaff7zn9fw8HCSpgUwmneaIlsgs0vzVFrgNTwNsoXL6ZDDivxnQhAAmWBFrBfkDCEIAFwOIQgyysqaEuW4Heoc8OtIa7/pcQAACfDUU09p06ZNeuSRR7R7927V1dVp/fr1amtrG/Xjf/zjH+tLX/qSHnnkETU0NOgHP/iBnnrqKf3t3/5tkicHcKHdjd2SOIWF5IudxPIRggDIALXRXpB9bIIAwGURgiCjeFwOvWf2VEnSTk5iAUBGeuyxx3Tfffdpw4YNWrp0qbZu3aq8vDw98cQTo378a6+9prVr1+ruu+/W7Nmzdfvtt+tTn/rUVbdHACTWO9E+kFWzCEGQXLGTWHSCAMgEK6pLJEVCEC5iAMDoCEGQcerjvSCUowNApvH7/dq1a5fWrVsXf5/D4dC6deu0c+fOUR9z0003adeuXfHQ48SJE3r++ed1xx13JGVmAJcKh23tiW+ClBidBdnH43JK4hwWgMywsLJAbqel7sGAzpynHB0ARuMyPQAw2SK9IIf1+olOhcK2nLGjvwCAtNfR0aFQKKSKioqL3l9RUaFDhw6N+pi7775bHR0duvnmm2XbtoLBoD73uc9d8RyWz+eTz+eL/3Nvb+/kfAIAJElH2/rV5wsqz+PUomihK5As3ug5LEIQAJnA63JqcWWR9jX3aF9zj2qm5pkeCQBSDpsgyDi104tU6HWpdziog2f5phUAZLsdO3bo0Ucf1fe+9z3t3r1bP/vZz/Tcc8/p61//+mUfs2XLFhUXF8ffampqkjgxkPl2R09h1c0okcvJUxIkV6wThHNYADIFvSAAcGU840DGcTkdWjM30gvyGr0gAJBRysrK5HQ61draetH7W1tbVVlZOepjHnroId1zzz367Gc/q+XLl+uTn/ykHn30UW3ZskXh8OjfANu8ebN6enrib01NTZP+uQDZbPfpSAhy3awSs4MgK8U7QdgEAZAhVsyIhiBnCEEAYDSEIMhIN86lFwQAMpHH49Hq1au1ffv2+PvC4bC2b9+u+vr6UR8zODgoh+PiL3mczsg9+MuVR3q9XhUVFV30BmDyxDZBrptJKTqSz8M5LAAZZvkFmyCUowPApegEQUaK9IJIb53qUiAUlpszCwCQMTZt2qQ///M/1/XXX68bbrhB3/nOdzQwMKANGzZIku69915VV1dry5YtkqQ777xTjz32mFatWqU1a9bo2LFjeuihh3TnnXfGwxAAydMzGNDx9gFJ0ipCEBgQ6wTxEYIAyBALKwrlcTrUMxRQU9eQZpbSCwIAFyIEQUZaXFmoKXlunR8M6N0z3Vo9a6rpkQAAk+Suu+5Se3u7Hn74YbW0tGjlypV64YUX4mXpjY2NF21+fOUrX5FlWfrKV76i5uZmTZs2TXfeeae++c1vmvoUgKy290y3JGlWaZ6m5nvMDoOsRCcIgEzjcTm0uKpQ756JlKMTggDAxQhBkJEcDkv180r1/L4WvXaskxAEADLMxo0btXHjxlH/3Y4dOy76Z5fLpUceeUSPPPJIEiYDcDV7mrolSStrSozOgezFOSwAmWh5dbHePdOjd5u79ZEVVabHAYCUwo0gZKz6aC/IzhP0ggAAAKQKQhCYFitG9wVDhicBgMkT7wWhHB0ALkEIgoxVH+0Fefv0eQ0HeIIDAABgmm3beidaik4IAlPYBAGQiWqjIch+ytEB4BKEIMhY86blq7zQK38wrN3RJ9sAAAAwp7FrUOcHA/I4HVo6vcj0OMhShCAAMlGsHL13OKimriHT4wBASiEEQcayLEs3zYuexDrOSSwAAADTYqewlkwvktflNDsMspaXEARABoqVo0vSvmZOYgHAhQhBkNHqCUEAAABSxjuN3ZKkVZzCgkGxThB/iBAEQGZZNj3aC0IIAgAXIQRBRrsp2guyp6lbA76g4WkAAACyG6XoSAWcwwKQqZZf0AsCABhBCIKMVjM1TzOm5CoYtvXWqS7T4wAAAGQtXzCkg2d7JRGCwKxYCOIjBAGQYeIhyFnK0QHgQoQgyHjxXpATnMQCAAAwpeFcn/yhsKbkuTWrNM/0OMhiHmekj4ZzWAAyzcLKArmdlroHAzpznnJ0AIghBEHGoxcEAADAvD2N5yVFtkAsyzI8DbIZ57AAZCqvy6lFlZFydE5iAcAIQhBkvPq5kV6Q/c096hkKGJ4GAAAgO430gUwxOwiyHiEIgExWSzk6AFyCEAQZr7I4R3On5StsS2+epBcEAADAhHgIMrPE6BwAIQiATFYb7wXpNTwJAKQOQhBkhVgvyGvHOwxPAgAAkH3OD/h1qnNQkrRyRonZYZD1vLEQhE4QABkoXo7eTDk6AMQQgiArxE5i0QsCAACQfLEtkLll+SrOc5sdBlkvFoL4giHDkwDA5FtUWSiXw1LXgF9ne4ZNjwMAKYEQBFnhxrlTJUmHWvrU0e8zPA0AAEB2eSfeB1JidA5AkjxOzmEByFw5bqcWVETK0fedoRcEACRCEGSJ0gKvFldGvgh4/QTbIAAAAMlEHwhSCZ0gADLd8uoiSZGTWAAAQhBkkZvmRU5ivcZJLAAAgKSxbVt72QRBCvHEz2ERggDITPFekLOEIAAgEYIgi6ydHy1HP0Y5OgAAQLKc6hxUz1BAHpdDiyuLTI8DjJzDohgdQIaqpRwdAC5CCIKsccOcqXI6LJ3qHFRz95DpcQAAALJCbAukdnpR/BX4gEmcwwKQ6ZZUFcnpsNTR71dLL+XoAMCzEGSNwhy3VsyIvBqCbRAAAIDkiPWBrJhRYnQOIIYQBECmy3E7taC8QBLl6AAgEYIgy9w0L3ISaye9IAAAAEnx7pluSVJdTbHZQYAor4tzWAAyX/wk1tlew5MAgHmEIMgqa6Pl6K8e7+AuJgAAQIIFQmEdiH7zpY5NEKQIj9MpiU0QAJlt+QW9IACQ7QhBkFWumzVFHpdDrb0+negYMD0OAABARjvc0idfMKzCHJdml+abHgeQxDksANmhtrpIkrSPEAQACEGQXXLcTq2eOUUSvSAAAACJ9m70DnndjBI5HJbhaYCIWAgSDNsKh9kOB5CZllYVy2FJ7X0+tVKODiDLEYIg66ydH+kFeY1eEAAAgITaGy9Fpw8EqSMWgkj0ggDIXLkepxaUF0qiHB0ACEGQdeqjvSA7T3Tyyi8AAIAE2hstRV9BHwhSiPeCEMTHSSwAGSxWjv4uJ7EAZDlCEGSduhnFKvC61D0Y0MFzvabHAQAAyEiD/qCOtvVLklbWlJgdBriAy2HJil5n8wVDZocBgARaHu0FoRwdQLYjBEHWcTkdumHOVEnSTk5iAQAAJMSBs70KhW2VF3pVWZxjehwgzrIseZyUowPIfMujm5j7mntk21zCAJC9CEGQlW6aF+kFefU45egAAACJEOsDqWMLBCko1gtCCAIgky2tKrqgHN1nehwAMIYQBFnppmgvyJsnuxSgDBEAAGDS7Y2WsNZRio4UFOsFoRgdQCa7qBydk1gAshghCLLS4spCTc33aNAfir9KEQAAAJPnXUrRkcI4hwUgWyyPvhiBEARANiMEQVZyOCzVz42exDpGLwgAAMBkOj/g1+nOQUnSCjZBkII4hwUgWyyvjoYg0RcnAEA2IgRB1lo7P3IS69Vj9IIAAABMpnejrzadXZqnkjyP4WmASxGCAMgWtbEQpLmXcnQAWYsQBFlr7fzIJsg7Tec14AsangYAACBzvEspOlJcLATx0QkCIMMtrSqS02Gpo59ydADZixAEWWvm1DzNmJKrQMjWm6e6TI8DAACQMfbSB4IURycIrtXjjz+u2bNnKycnR2vWrNGbb755xY//zne+o0WLFik3N1c1NTX6/Oc/r+Hh4SRNi2wWKUcvkDTS1wUA2YYQBFnLsiytnRc5ifUaJ7EAAAAmhW3b2tMUOYdVRx8IUhTnsHAtnnrqKW3atEmPPPKIdu/erbq6Oq1fv15tbW2jfvyPf/xjfelLX9IjjzyihoYG/eAHP9BTTz2lv/3bv03y5MhWsZNY+ylHB5ClCEGQ1dYuiIQgr1CODgAAMCnO9Qyro98np8PSsumEIEhNHpdTEiEIJuaxxx7Tfffdpw0bNmjp0qXaunWr8vLy9MQTT4z68a+99prWrl2ru+++W7Nnz9btt9+uT33qU1fdHgEmS7wcnRAEQJYiBEFWu2lepBek4VyvOvu5jQkAAHCtYqc2FlYUKtfjNDsMcBnxc1h0gmCc/H6/du3apXXr1sXf53A4tG7dOu3cuXPUx9x0003atWtXPPQ4ceKEnn/+ed1xxx1JmRlYPmMkBKEcHUA2mlAIwu1LZIqyAq8WVxZKkl47zjYIAADAtXr3DKewkPq87mgxeiBkeBKkm46ODoVCIVVUVFz0/oqKCrW0tIz6mLvvvltf+9rXdPPNN8vtdmvevHm69dZbr3gOy+fzqbe396I3YKJGytH9aunl+3EAss+4QxBuXyLTrJ0f7QU5Ti8IAADAtYqd2ojdHwdSkZdNECTRjh079Oijj+p73/uedu/erZ/97Gd67rnn9PWvf/2yj9myZYuKi4vjbzU1NUmcGJkmxz1Sjr7vDCexAGSfcYcg3L5Eprl5fqwXhBAEAADgWti2HQ9BVrAJghRGMTomqqysTE6nU62trRe9v7W1VZWVlaM+5qGHHtI999yjz372s1q+fLk++clP6tFHH9WWLVsUDo/+38HNmzerp6cn/tbU1DTpnwuyC70gALLZuEKQZN2+ZO0TyXTDnKlyOSw1dQ2psXPQ9DgAAABp68z5IXUPBuR2WloUPTkKpCJCEEyUx+PR6tWrtX379vj7wuGwtm/frvr6+lEfMzg4KIfj4m+/OJ2RzqTL9TN4vV4VFRVd9AZciwt7QQAg27jG88FXun156NChUR9z9913q6OjQzfffLNs21YwGNTnPve5K57D2rJli7761a+OZzRgwvK9Lq2aWaK3Tp3Xq8c7NLN0pumRAAAA0lLsGyuLKgvldVGKjtQVK0b3cQ4LE7Bp0yb9+Z//ua6//nrdcMMN+s53vqOBgQFt2LBBknTvvfequrpaW7ZskSTdeeedeuyxx7Rq1SqtWbNGx44d00MPPaQ777wzHoYAiRY7U7k/Wo5uWZbhiQAgecYVgkzEhbcvY3/ZP/jgg/r617+uhx56aNTHbN68WZs2bYr/c29vL/cvkVBr55dFQpBjHfrUDYQgAAAAExErRV9eXWJ2EOAq2ATBtbjrrrvU3t6uhx9+WC0tLVq5cqVeeOGF+AtGGxsbL9r8+MpXviLLsvSVr3xFzc3NmjZtmu68805985vfNPUpIAtdWI5+rmdY00tyTY8EAEkzrhDkWm9fStLy5cs1MDCgv/iLv9CXv/zlS1ZCpcjap9frHc9owDVZO79M3/ntUb12vFPhsC2Hg1dEAAAAjNe+5m5J9IEg9RGC4Fpt3LhRGzduHPXf7dix46J/drlceuSRR/TII48kYTJgdDlupxZWFKrhXK/ePdNDCAIgq4yrEyRZty+BZFtZU6J8j1NdA34daukzPQ4AAEDasW1b++KbIIQgSG2EIACy0Yp4OXq32UEAIMnGFYJIkduX27Zt05NPPqmGhgbdf//9l9y+3Lx5c/zj77zzTv3jP/6jfvKTn+jkyZP6zW9+w+1LpBy306Eb5kyVJL1yrN3wNAAAAOmnsWtQvcNBeZwOLaygFB2pLdYJ4qcTBEAWiZWjx85XAkC2GHcnCLcvkanWzi/Ti4fb9cqxTv3Fe+eZHgcAACCtxL6hsqSqMP4qeyBVedkEAZCF6maUSIr8nU05OoBsMqFidG5fIhPdsmCapAa9ebJTw4GQctxsKgEAAIzVvuZICFLLKSykAc5hAchGiyoL5XE61DMUUGPXoGaV5pseCQCSgpdoAVELKwpUXujVcCCs3afPmx4HAAAgrcT6QChFRzqIhyCcwwKQRTwuh5ZURU5WchILQDYhBAGiLMvSzfPLJEkvH+swPA0AAED6CIdt7W+OlaKXmB0GGANPtJ/SxyYIgCyzIn4Sq9voHACQTIQgwAVuXhAJQV45SggCAAAwVqc6B9TnC8rjcmhBRYHpcYCrinWCEIIAyDaUowPIRoQgwAVimyD7z/bo/IDf8DQAAADpIdYHsrSqSG4nTzGQ+ugEAZCtYuXo+5t7FArbZocBgCThGQpwgfKiHC2qKJRtS68d7zQ9DgAAQFqgDwTpZiQECRmeBACSa960fOW6nRrwh3Syo9/0OACQFIQgwB+In8Q61m54EgAAgPTwbnQTpLaaEATpgWJ0ANnK5XSotrpIkrS3iZNYALIDIQjwB2IhyMtHO2TbrIYCAABcSThs60AzmyBILx4n57AAZK/l1SWSRs5ZAkCmIwQB/sCaOVPldlo6c35IpzsHTY8DAACQ0k50DGjAH1KO26H50yhFR3rw0gkCIIvV1URetLD3TLfZQQAgSQhBgD+Q53HpuplTJEkvH+swPA0AAEBq29fcLSlSiu6iFB1pgmJ0ANlsefR85cGzvQpwFhBAFuBZCjCKW2K9IEfpBQEAALiSfWd6JY18QwVIB3SCAMhms0vzVZjjki8Y1pHWPtPjAEDCEYIAo7h5wTRJ0mvHOxXkiREAAMBl7T8buSe+fEaJ2UGAcYh1ggRCtsJhegABZBeHw4q/eGHfGXpBAGQ+QhBgFMuri1Wc61bfcFDvUhQGAAAwqnDY1sGzkU2Q2uoiw9MAYxfbBJHYBgGQnVZEX7ywlxAEQBYgBAFG4XRYumleqSTplaP0ggAAAIzmVOeA+n1BeV2UoiO9EIIAyHYrZkQ3QaLdXgCQyQhBgMu4Od4LQggCAAAwmv3RLZDFlKIjzXgu+O8r5egAslEsBDl0rk/DgZDhaQAgsXimAlzGe6O9ILsbz6tvOGB4GgAAgNRzIHo2dDmnsJBmLMuKByE+QhAAWai6JFdT8z0Khm0daqEcHUBmIwQBLqNmap7mlOUrGLa183in6XEAAABSTqwUvXZ6seFJgPHzRk9isQkCIBtZlhXfBtnb1G12GABIMEIQ4AreGz2J9dLRdsOTAAAApBbbtrW/OVaKTgiC9OMhBAGQ5eri5ejdRucAgEQjBAGu4JboSayX6QUBAAC4yJnzQ+oZCsjttLSwotD0OMC4EYIAyHYra0oksQkCIPMRggBXcOO8Urkclk53Dup054DpcQAAAFLG/mgfyKLKwvg3k4F0Eg9BQhQCA8hOsXNYx9sH1EsXKoAMxrMV4AoKvC6tnjVFkvQS2yAAAABx9IEg3VGMDiDblRZ4VTM1V5K070yP4WkAIHEIQYCreO/CyEmsl47QCwIAABAT6wNZRh8I0hTnsABgpBdkDyexAGQwQhDgKt4b7QXZebxTgRBPkAAAACKl6JFXjC4nBEGaIgQBAHpBAGQHQhDgKpZNL9LUfI/6fUG909htehwAAADjWnqH1Tngl9NhaXElpehIT7FzWH5e6AQgi9XFQpAz3UbnAIBEIgQBrsLhsHTz/DJJ0stHOYkFAAAQO4W1oLxAOW6n4WmAiWETBAAi3V5Oh6XWXp9aeoZNjwMACUEIAozBLQsiIQi9IAAAAIqfwlpGKTrSmJcQBACU63FqUUVkq3NP03nD0wBAYhCCAGMQK0d/t7lHXQN+w9MAAACYdeBsrA+kyPAkwMTFN0E4hwUgy8VOYu1p6jE7CAAkCCEIMAYVRTlaVFEo25ZePdZhehwAAACjYuewailFRxqLd4KwCQIgy62sifx9Tjk6gExFCAKM0XsXchILAACgvc+nlt5hWZa0pIpNEKSv2CaIjxAEQJaLbYLsa+5RKGybHQYAEoAQBBijWxZETmK9dLRdts0XBQAAIDvtj57CmluWr3yvy/A0wMR5XU5JhCAAsKC8UHkep/p9QZ1o7zc9DgBMOkIQYIxumDNVOW6HWnt9OtzaZ3ocAAAAIw40x/pAOIWF9OahGB0AJElOhxU/cbmHk1gAMhAhCDBGOW6n6ueWSpJ+f5iTWAAAIDvRB4JMQQgCACNWRk9i7T3TbXQOAEgEQhBgHN63MHIS6/f0ggAAgCwVO4e1dDp9IEhv8WL0UMjwJABgXt2MEknS3qYes4MAQAIQggDj8L5F5ZKkt051acAXNDwNAABAcvUMBnTm/JAkadl0NkGQ3tgEAYARdTWRv9cbzvVqOEA4DCCzEIIA4zC7NE8zp+YpELL12vFO0+MAAAAk1YHoFkjN1FwV57oNTwNcGy8hCADEVZfkqqzAo2DY1sFzvabHAYBJRQgCjINlWRecxGozPA0AAEByHTgb7QNhCwQZIL4JEiIEAQDLskZ6QShHB5BhCEGAcbp1USQE2XG4XbZtG54GAAAgeWKbIMvoA0EGiHeCsAkCAJJGekHeaew2OgcATDZCEGCcbpxbKo/ToTPnh3SyY8D0OAAAAEmzP7oJsqyaTRCkv9gmiI8QBAAkSatmTpEk7WETBECGIQQBxinf69J75kS+MNhxuN3wNAAAAMkx5A/pRHu/JDZBkBkoRgeAi62oKZZlSY1dg+ro95keBwAmDSEIMAEjvSCEIAAAIDs0tPQqbEvTCr0qL8wxPQ5wzeLnsOgEAQBJUlGOWwvKCyRJeziJBSCDEIIAE/C+heWSpNdPdGo4EDI8DQAAQOIdaI70gdSyBYIMET+HFSAEAYCYWDn6O03nzQ4CAJOIEASYgIUVBaoqzpEvGNYbJ7tMjwMAAJBwB2J9INPpA0FmiJ/DYhMEAOJivSCUowPIJIQgwARYljVyEoteEAAAkAX2n41sgtAHgkzhdTkl0QkCABdaNbNEkrS3qVuhsG12GACYJIQgwATFQpAdR9oMTwIAAJBYgVBYR1oipei11WyCIDN4KUYHgEssKC9UvsepAX9IR9v6TI8DAJOCEASYoJvml8npsHSifUCNnYOmxwEAAEiYo6398ofCKspxacaUXNPjAJOCc1gAcCmnw1JdrBeEk1gAMgQhCDBBxblurZ4VuZXJNggAAMhksVNYS6cXybIsw9MAk8PjZBMEAEYTO4n1TiPl6AAyAyEIcA1uW1QuSXrxECEIAADIXAejpei1lKIjg3g4hwUAo1pVQzk6gMxCCAJcg9sWR3pBXjveqeFAyPA0AAAAiXEgVopeTSk6MseF57Bsm/JfAIhZGd0EOdrWr56hgNlhAGASEIIA12BRRaGqinPkC4a180Sn6XEAAAAmXThsswmCjBQLQSR6QQDgQmUFXs2cmidJevdMt9lhAGASEIIA18CyLN0aPYm1g5NYAAAgA53qHNCAP6Qct0NzpxWYHgeYNLFOEImTWADwh0Z6QbqNzgEAk4EQBLhGty2KnMR68XA7a/QAACDjHIhugSyuLJLTQSk6MgchCABc3qqaEkmUowPIDIQgwDVaO79MHqdDjV2DOtExYHocAACASbU/2gdSSx8IMozDYcntjAR7nMMCgIutmhktR2/q5gWfANIeIQhwjfK9Lq2ZO1WS9CInsQAAQIaJ9YEsow8EGSi2DeILEIIAwIWWVBXJ43KoezCgU52DpscBgGtCCAJMgngvyOF2w5MAAABMHtu24+ewlk1nEwSZJ1aOziYIAFzM43KoNvp3PyexAKQ7QhBgEsR6Qd442akBX9DwNAAAAJPjXM+wugb8cjosLawoND0OMOniIQidIABwifhJLMrRAaQ5QhBgEswpy9es0jwFQrZePdZhehwAyHiPP/64Zs+erZycHK1Zs0ZvvvnmFT++u7tbDzzwgKqqquT1erVw4UI9//zzSZoWSF+xLZAF5QXKcTsNTwNMPq8r8t9rHyEIAFxi1cwSSdJuNkEApDlCEGASWJal26InsV7kJBYAJNRTTz2lTZs26ZFHHtHu3btVV1en9evXq61t9F4mv9+vD37wgzp16pSefvppHT58WNu2bVN1dXWSJwfSz4FoKfpSTmEhQ7EJAgCXt3pWZBPkUEsfVy8ApDVCEGCS3Bo9ibXjcJts2zY8DQBkrscee0z33XefNmzYoKVLl2rr1q3Ky8vTE088MerHP/HEE+rq6tIzzzyjtWvXavbs2Xrf+96nurq6JE8OpJ8DlKIjw8WK0ekEAYBLVRXnanpxjkJhW3vPdJseBwAmjBAEmCQ3zi1Vjtuhcz3DOtTSZ3ocAMhIfr9fu3bt0rp16+LvczgcWrdunXbu3DnqY5599lnV19frgQceUEVFhWpra/Xoo48qFAola2wgbR2kFB0Zjk0QALiy66LbILtPcxILQPoiBAEmSY7bqbXzyiRJvzs0+kkWAMC16ejoUCgUUkVFxUXvr6ioUEtLy6iPOXHihJ5++mmFQiE9//zzeuihh/Ttb39b3/jGNy776/h8PvX29l70BmSb8wN+NXcPSeIcFjIXIQgAXFnsJNYuQhAAaYwQBJhE718S6QUhBAGA1BEOh1VeXq7vf//7Wr16te666y59+ctf1tatWy/7mC1btqi4uDj+VlNTk8SJgdRw8Fwk/Js5NU9FOW7D0wCJ4Y2FIGwHAsCoYiHI7sZuhcOc/gaQnghBgEn0/sWREGR343l1DfgNTwMAmaesrExOp1Otra0Xvb+1tVWVlZWjPqaqqkoLFy6U0+mMv2/JkiVqaWmR3z/6n9WbN29WT09P/K2pqWnyPgkgTcRK0TmFhUzmjnaCBIJ8Yw8ARrOkqkg5bod6hgI60dFvehwAmBBCEGASVRXnamlVkWw7UpAOAJhcHo9Hq1ev1vbt2+PvC4fD2r59u+rr60d9zNq1a3Xs2DGFwyOnTo4cOaKqqip5PJ5RH+P1elVUVHTRG5BtDtAHgizgcliSpECYc1gAMBq306G6GSWSOIkFIH0RggCT7APRk1jbOYkFAAmxadMmbdu2TU8++aQaGhp0//33a2BgQBs2bJAk3Xvvvdq8eXP84++//351dXXpwQcf1JEjR/Tcc8/p0Ucf1QMPPGDqUwDSwkgIUmx4EiBxXM5ICBLixAsAXBa9IADSncv0AECmuW1xub77u2N66XC7AqFwfMUeADA57rrrLrW3t+vhhx9WS0uLVq5cqRdeeCFelt7Y2CiHY+TP3pqaGv3qV7/S5z//ea1YsULV1dV68MEH9cUvftHUpwCkvCF/SCfaIycv2ARBJnNF/74IhAhBAOByCEEApDtCEGCS1c0oUWm+R50Dfr11qks3zSszPRIAZJyNGzdq48aNo/67HTt2XPK++vp6vf766wmeCsgcDS29CttSWYFX5UU5pscBEmZkE4RzWABwOatmRkKQ4+0DOj/g15T80U/KAkCq4iXqwCRzOizduihyEut3DZzEAgAA6Yc+EGSLeCcImyAAcFlT8z2aOy1fkvROE9sgANIPIQiQALFekN/RCwIAANLQwbM9kghBkPlc0dO1QUIQALii1TM5iQUgfRGCAAlwy4IyuRyWTnQMxO9pAwAApAtK0ZEtYpsgnMMCgCujFwRAOiMEARKgMMetNXOnSmIbBAAApJdAKKxDLX2S2ARB5osXo4fZBAGAK4mFIHubehQIERwDSC+EIECCvH9xhSRCEAAAkF6Ot/fLHwyrwOvSzKl5pscBEsodL0YnBAGAK5k3rUBFOS4NBUI6dK7P9DgAMC6EIECCfGBxpBfkzZNd6h0OGJ4GAABgbA40R05hLZ1eJEf0VBCQqZzxYnRe1QwAV+JwWLoufhKry/A0ADA+hCBAgswuy9fcafkKhm29fKTD9DgAAABjsp9SdGQRitEBYOzi5eiN3WYHAYBxIgQBEii2DbK9odXwJAAAAGNDKTqySawYPcg5LAC4qng5+ik2QQCkF0IQIIHWLYn2ghxuU5AVewAAkOLCYVsN8RCETRBkPle0E4Sv1QHg6upqSuR0WDrbM6zm7iHT4wDAmBGCAAm0etYUleS51T0Y0K7T502PAwAAcEVN5wfV5wvK43RofnmB6XGAhHM7Ik+JKUYHgKvL97riL5J4m20QAGmEEARIIJfTofcvip7EOtRmeBoAAIAri53CWlRZKLeTpwrIfPFidEIQABiT62dNlSS9fYoXegJIHzyzARLsA9GTWL89SC8IAABIbQcoRUeWcXMOCwDG5T2zI70gb7EJAiCNEIIACfbehWVyOy2d6BjQ8fZ+0+MAAABc1gH6QJBlnNFzWBSjA8DYrI6GIIdb+9QzFDA8DQCMDSEIkGCFOW7dOLdUkrS9gW0QAACQumIhyNLpxYYnAZKDYnQAGJ/ywhzNLs2TbUu76T4FkCYIQYAk+ODS2EksekEAAEBqausdVnufT5YlLakqND0OkBTxc1hsggDAmL1ndqQXhJNYANIFIQiQBLFekLdPd+n8gN/wNAAAAJeKbYHMm1agPI/L8DRAcsTPYYUIQQBgrGIhCOXoANIFIQiQBNUluVpSVaSwLb14mG0QAACQeihFRzZyO2KbIJzDAoCxuj7aC7LnTLd8wZDhaQDg6ghBgCT54JJySdJv6QUBAAApiFJ0ZCOng3NYADBec8ryVZrvkT8Y1r4zPabHAYCrIgQBkmRdtBfk94fbeaUEAABIOSMhCKXoyB5uJ+ewAGC8LMuKb4O8xUksAGmAEARIktrpxaoo8mrAH9IbJygPAwAAqaNnKKDGrkFJbIIgu7goRgeACRnpBeH7GwBSHyEIkCQOh6X3L45sg/zmICexAABA6jgY3QKpLslVSZ7H8DRA8sTPYYXoBAGA8YiHIKfPK0yQDCDFEYIASXR79CTWbxtaZdt8kQAAAFIDpejIVvFzWHwDDwDGZen0IuW6neoZCuhoW7/pcQDgighBgCSqn1eqPI9T53qGtb+51/Q4AAAAkkY2QegDQbYZKUZnEwQAxsPtdGjVzBJJ0lucxAKQ4ghBgCTKcTt166JpkqRfH2wxPA0AAEDESCk6myDILm4HxegAMFH0ggBIF4QgQJJ9cCm9IAAAIHUMB0I61h45Y7GsmhAE2YVidACYuFgI8tap84YnAYArIwQBkuy2ReVyOiwdaulTY+eg6XEAAECWO9TSp1DY1tR8jyqLckyPAySVi2J0AJiwVTNL5HRYau4eUnP3kOlxAOCyCEGAJCvJ82jNnMirJTiJBQAATLuwFN2yLMPTAMnlcnIOCwAmKt/rip/SfPNkp+FpAODyCEEAA2InsX7NSSwAAGDYAUrRkcXimyCcwwKACYm9yPPNk/SCAEhdhCCAAbEQ5O1TXeoa8BueBgAAZDNK0ZHNRjpBOIcFABOxZk6pJOmNE4QgAFIXIQhgwIwpeVpaVaSwLW1vYBsEAACYEQyFdegcIQiyl8sRPYfFJggATMh7Zk+VZUknOgbU1jdsehwAGBUhCGBIbBvkN5zEAgAAhpzoGJAvGFa+x6nZpfmmxwGSLnYOy7alEEEIAIxbcZ5biytjvSBsgwBITYQggCG3L4uEIC8dbdeQP2R4GgAAkI32N0dK0ZdUFcnhoBQd2Sd2DkuSAiFOYgHARMR6QTiJBSBVEYIAhiytKlJ1Sa6GA2G9cqzD9DgAACALxfpAaqspRUd2ip3DktgEAYCJohwdQKojBAEMsSwrfhLr1wdaDE8DAACyUWwThD4QZKsLN0GCIUIQAJiIG6IhyOHWPnUN+A1PAwCXIgQBDLo9GoJsP9SmIOv3AAAgicJhWwfPxkrR2QRBdnJdcAYuGObrcQCYiNICrxaUF0iS3jrFNgiA1EMIAhh0w5ypKslzq2vAr7dPnzc9DgAAyCJN5wfV5wvK43RoQUWB6XEAIyzLkjMahAQ5hwUAE3YDvSAAUhghCGCQy+nQBxZHtkFe2M9JLAAAkDyxPpBFlYVyO3lagOwV2wahGB0AJm7N3FJJ0hsnOw1PAgCX4tkOYNj6ZZEQ5DcHW2XbvPoMAAAkx4GzkT6Q2mr6QJDdYiEIxegYj8cff1yzZ89WTk6O1qxZozfffPOKH9/d3a0HHnhAVVVV8nq9WrhwoZ5//vkkTQskXqwc/eC5XvUOBwxPAwAXIwQBDHvvwmnKdTvV3D2k/c29pscBAABZIvZ1x1L6QJDlXNFNqADF6Bijp556Sps2bdIjjzyi3bt3q66uTuvXr1dbW9uoH+/3+/XBD35Qp06d0tNPP63Dhw9r27Ztqq6uTvLkQOJUFOVodmmebFt6m14QACmGEAQwLMft1K2LpkmSfnWAk1gAACDxbNse2QSZziYIspvbySYIxuexxx7Tfffdpw0bNmjp0qXaunWr8vLy9MQTT4z68U888YS6urr0zDPPaO3atZo9e7be9773qa6uLsmTA4m1Zk7sJBYhCIDUQggCpID1yyolEYIAAIDkaOvzqaPfL4clLa4kBEF2c9IJgnHw+/3atWuX1q1bF3+fw+HQunXrtHPnzlEf8+yzz6q+vl4PPPCAKioqVFtbq0cffVShUOiyv47P51Nvb+9Fb0CqWzOXcnQAqYkQBEgBty0ul8th6Whbv06095seBwAAZLjYFsj88gLlepyGpwHMcjkiT4uDbIJgDDo6OhQKhVRRUXHR+ysqKtTSMvqL2k6cOKGnn35aoVBIzz//vB566CF9+9vf1je+8Y3L/jpbtmxRcXFx/K2mpmZSPw8gEW6I9oLsb+7RgC9oeBoAGEEIAqSA4ly36udF1kZ/daDV8DQAACDTxfpAltEHAsgVP4fFJggSIxwOq7y8XN///ve1evVq3XXXXfryl7+srVu3XvYxmzdvVk9PT/ytqakpiRMDEzNjSp6qS3IVDNvadfq86XEAII4QBEgRsZNYL3ASCwAAJFhsE2QZfSCAXPFzWGyC4OrKysrkdDrV2nrxi9daW1tVWVk56mOqqqq0cOFCOZ0jm3dLlixRS0uL/H7/qI/xer0qKiq66A1IBzfOjbzA8/UTnYYnAYAREwpBHn/8cc2ePVs5OTlas2aN3nzzzSt+fHd3tx544AFVVVXJ6/Vq4cKFev755yc0MJCpbl9aIcuS9jZ1q6Vn2PQ4AAAgg7EJAoxwOyNPiylGx1h4PB6tXr1a27dvj78vHA5r+/btqq+vH/Uxa9eu1bFjxxS+YNvoyJEjqqqqksfjSfjMQDLFrlzsJAQBkELGHYI89dRT2rRpkx555BHt3r1bdXV1Wr9+vdra2kb9eL/frw9+8IM6deqUnn76aR0+fFjbtm1TdXX1NQ8PZJLyohytqimRJP36INsgAAAgMboH/WruHpIkLWUTBKAYHeO2adMmbdu2TU8++aQaGhp0//33a2BgQBs2bJAk3Xvvvdq8eXP84++//351dXXpwQcf1JEjR/Tcc8/p0Ucf1QMPPGDqUwASJhaCvHumR/30ggBIEa7xPuCxxx7TfffdF//LfevWrXruuef0xBNP6Etf+tIlH//EE0+oq6tLr732mtxutyRp9uzZ1zY1kKHWL6vU7sZu/epAi+6tn216HAAAkIEOnI1sgcycmqfiXLfhaQDzXNFNkCDnsDBGd911l9rb2/Xwww+rpaVFK1eu1AsvvBAvS29sbJTDMfKa05qaGv3qV7/S5z//ea1YsULV1dV68MEH9cUvftHUpwAkTHVJrmZOzVNj16DeOtWl2xaVmx4JAMYXgvj9fu3ateuiVzQ4HA6tW7dOO3fuHPUxzz77rOrr6/XAAw/oF7/4haZNm6a7775bX/ziFy+6h3khn88nn88X/+fe3t7xjAmkrfXLKrXl3w/p9RNdOj/g15R8VqMBAMDkivWB1FazBQJII50gQc5hYRw2btyojRs3jvrvduzYccn76uvr9frrryd4KiA11M8tVWPXoHYe7yQEAZASxnUOq6OjQ6FQKP7qhpiKigq1tIx+vufEiRN6+umnFQqF9Pzzz+uhhx7St7/9bX3jG9+47K+zZcsWFRcXx99qamrGMyaQtmaX5WtJVZFCYVu/aWi9+gMAAADGiT4Q4GIjIQjnsABgMsR7QY7TCwIgNUyoGH08wuGwysvL9f3vf1+rV6/WXXfdpS9/+cvaunXrZR+zefNm9fT0xN+ampoSPSaQMj5cWylJ+vd95wxPAgAAMlFsE2QZfSCAJIrRAWCyxUKQA2d71DMUMDwNAIwzBCkrK5PT6VRr68WvUG9tbVVlZeWoj6mqqtLChQsvOn21ZMkStbS0yO/3j/oYr9eroqKii96AbBELQV451qHeYb5YAAAAk2fQH9SJjgFJbIIAMSPF6IQgADAZKopyNLcsX2FbevNkl+lxAGB8IYjH49Hq1au1ffv2+PvC4bC2b9+u+vr6UR+zdu1aHTt2TOELVouPHDmiqqoqeTz0HQB/aEFFoeaXFygQsvW7hjbT4wAAgAzScK5Xti1VFHk1rdBrehwgJbid0XNYIc5hAcBkuZGTWABSyLjPYW3atEnbtm3Tk08+qYaGBt1///0aGBjQhg0bJEn33nvvRcXp999/v7q6uvTggw/qyJEjeu655/Too4/qgQcemLzPAsgwsW2Q5zmJBQAAJtGBs/SBAH/ISTE6AEy6+rnREOQEIQgA81zjfcBdd92l9vZ2Pfzww2ppadHKlSv1wgsvxMvSGxsb5XCMZCs1NTX61a9+pc9//vNasWKFqqur9eCDD+qLX/zi5H0WQIb5UG2lvvu7Y/r9kXYN+ILK9477f6oAAACX2N9MHwjwh1zRThA2QQBg8twYDUEazvXq/IBfU/K5BgPAnAl9Z3Xjxo3auHHjqP9ux44dl7yvvr5er7/++kR+KSArLa0q0qzSPJ3uHNSOw+36yIoq0yMBAIAMsL85sglSW80mCBDjZhMEACbdtEKvFpQX6Ghbv9442akP1fJ9DQDmjPscFoDEsyxLH4qdxNrPSSwAAHDtfMGQjrT2SWITBLiQM3rJgBAEACZXPb0gAFIEIQiQoj4cfZXEi4faNBwIGZ4GAACku6Ot/QqGbZXkuVVdkmt6HCBlUIwOAIlBLwiAVEEIAqSouhnFml6co0F/SC8daTc9DgAASHOxPpDa6cWyLMvwNEDqoBgdABJjTTQEOdLar45+n+FpAGQzQhAgRUVOYkW2Qf59f4vhaQAAQLrbfzZail7NKSzgQu54MTohCABMpqn5Hi2uLJQkvc42CACDCEGAFPbh5ZFekN82tMoX5CQWAACYuHgp+nRK0YELudgEAYCEuWlemSTpNXpBABhECAKksNUzp6i80Ku+4aBePdZhehwAAJCmgqGwGs5FQhBK0YGLOekEAYCEWTs/chKL72kAMIkQBEhhDoelO5ZHTmL9cu85w9MAAIB0dbx9QL5gWPkep2aX5pseB0gpbkf0HBabIAAw6dbMLZXTYel056CaugZNjwMgSxGCACnuIysiIchvDrZqOMBJLAAAMH4HYn0g04vlcFCKDlxopBidTRAAmGwFXpdW1pRIkl47zjYIADMIQYAUt3rmFFUW5ajPF9TLR/mCAQAAjF+sD4RSdOBS7vg5LDZBACAR1s6P9IK8eoxeEABmEIIAKe7Ck1jPvXvW8DQAACAd7Y9uglCKDlzK5Yw8LQ4QggBAQqydF+kFee14h2ybP2sBJB8hCJAGOIkFAAAmKhy2dfAsmyDA5bii57BCnMMCgIRYNXOKct1OdfT7dbi1z/Q4ALIQIQiQBq6bWaLqklwN+EPacbjd9DgAACCNnO4aVL8vKK/LofnTCkyPA6ScWAgSoBgdABLC43LohjlTJUmvcOYbgAGEIEAasCxLdyyvlCQ9t++c4WkAAEA62d8cOYW1uKoofvYHwAhn9H8XIc5hAUDC3BzvBSEEAZB8PAsC0sRHVkyXJG1vaNWQn5NYAABgbA5ET2HVTucUFjAad3QTJMg5LABImJvmR3pB3jjZpUCIP28BJBchCJAm6mYUa8aUXA36Q3rxcJvpcQAAQJo4ECtFr6YUHRgNxegAkHhLKos0Nd+jQX9Ie5q6TY8DIMsQggBpwrKseEH6c+9yEgsAAFydbdvxc1jL2AQBRjVSjE4IAgCJ4nBYqp8X2QbhJBaAZCMEAdLIR5dHT2IdatWgP2h4GgAAkOrO9gzr/GBALoelhRWFpscBUpLLGS1G5zwLACQUvSAATCEEAdJIbXWRZpXmaTgQ1m8bOIkFAACuLLYFsqCiUDlup+FpgNTEJggAJMfaeZEQ5J3Gbg34eGEngOQhBAHSiGVZujNakP7snmbD0wAAgFR3IBqCUIoOXJ7LEe0EIQQBgISaWZqnmqm5CoZtvXmyy/Q4ALIIIQiQZj6xKhKC7DjcrvMDfsPTAACAVHbgbK8k+kCAK4mdwwpyDgsAEi62DfIKJ7EAJBEhCJBm5pcXamlVkYJhW8/vpyAdAABc3r7oJsjyGcWGJwFSV2wThHNYAJB4a+kFAWAAIQiQhmLbIL9456zhSQAAQKpq6x1WW59PliUtqWITBLgcitEBIHnWzi+TZUmHWvrU1jtsehwAWYIQBEhDd9ZNl2VJb57q0pnzg6bHAQAAKSh2CmvetALleVyGpwFSF8XoAJA8U/M9Wl4d2VB9+SjbIACSgxAESENVxblaM2eqJOnf9nISCwAAXGo/pejAmLic0WL0ECEIACTDLQsiJ7FePtpueBIA2YIQBEhTH19ZLUn6xZ5mw5MAAIBUtP9sNASppg8EuJLYJkgwzDksAEiGWxZMkxQpRw+zhQcgCQhBgDR1R22V3E5Lh1r6dKil1/Q4AAAgxexvjnx9sGw6IQhwJbFOEM5hAUByXDdzivI8TnX0+9XA9zMAJAEhCJCmivPcunVRuSTpF3soSAcAACPOD/jV3D0kSVrKOSzgilwOzmEBQDJ5XA7Vzy2VRC8IgOQgBAHS2CeiJ7Ge3XOWFVIAABAXK0WfVZqn4ly34WmA1OZmEwQAko5eEADJRAgCpLEPLClXgdel5u4h7Wo8b3ocAACQIuJ9IJzCAq7KGe0ECYToBAGAZLllYaQX5K2T5zXkDxmeBkCmIwQB0liO26n1yyolST/bTUE6AACI2N8cCUGWVXMKC7gatzPytDjIJggAJM3csnxVl+TKHwrrjZOdpscBkOEIQYA098fXRU5i/fLdsxoO8OoJAAAwcg6LTRDg6mKbIKGwLdsmCAGAZLAs64KTWPSCAEgsQhAgzd04t1TVJbnqGw7q1wdbTY8DAAAM6xsO6GTHgCRpGaXowFW5HSNPi9kGAYDkuWVB5CQWvSAAEo0QBEhzDocV3wZ5etcZw9MAAADTDka3QKYX56i0wGt4GiD1uaLF6BLl6ACQTGvnl8qypCOt/WrpGTY9DoAMRggCZIA/um6GJOmVo+184QAAQJbbHw1BllVzCgsYi9g5LIlydABIppI8j1bMKJHENgiAxCIEATLA7LJ8vWf2FIVt6efvUJAOAEA2OxAtRV9OCAKMSawYXZKCITZBACCZbplPLwiAxCMEATLEn6yObIM8vauJQkcAALLY/rOREKS2mj4QYCwuWAShEwQAkixWjv7KsQ5OEgJIGEIQIEPcsbxKOW6HjrcPaO+ZHtPjAAAAA4b8IR1r65ck1U5nEwQYC8uy5I72ggTDnMMCgGS6btYUFXpd6hrwa38z38sAkBiEIECGKMxx68O1VZIi2yAAACD7NLT0KmxL0wq9Ki/KMT0OkDZcjshTY85hAUByuZ0OrY2exNpxmF4QAIlBCAJkkD+OFqQ/u+eshgMhw9MAAIBki/WB1E7nFBYwHi5HbBOEEAQAku19i6ZJkn5/pM3wJAAyFSEIkEHq55VqenGOeoeD2t7AFw8AAGSb/c29kqRaStGBcXHFzmGFOIcFAMl2azQE2dPUre5Bv+FpAGQiQhAggzgdlv4oug3y07c5iQUAQLaJlaIvYxMEGBdn7BwWmyAAkHRVxblaVFGosC29dLTD9DgAMhAhCJBh/vT6SAjy0tF2nTk/aHgaAACQLL5gSEda+ySxCQKMV7wYnU4QADAifhKLXhAACUAIAmSYWaX5Wju/VLYt/fQttkEAAMgWR1v7FQjZKslzq7ok1/Q4QFqJn8MKcw4LAEy4dWGsF6RdYbbyAEwyQhAgA33qhpmSpKfebuKuMQAAWWJfvBS9WJZlGZ4GSC8uzmEBgFGrZ09Rnsepjn6fDp7rNT0OgAxDCAJkoNuXVqo036PWXp9eZJUUAICssD8WgnAKCxg3lyMSHAZ4AREAGOF1OXXTvDJJkW0QAJhMhCBABvK4HPrj1ZFukJ+82Wh4GgAAkAz7z0ZeNVlbTSk6MF7OaAgSYhMEAIy5lV4QAAlCCAJkqP/rPTWSpBcPt+ls95DhaQAAQCIFQmE1RE9H1E5nEwQYL7czeg6LYnQAMOZ90V6QXY3n1TMUMDwNgExCCAJkqLnTCnTj3KkK29JP36YgHUBmefzxxzV79mzl5ORozZo1evPNN8f0uJ/85CeyLEuf+MQnEjsgkGTH2vrlD4ZV6HVp5tQ80+MAaWekGJ0QBABMqZmap3nT8hUK23r1WIfpcQBkEEIQIIPFCtJ/+lYTq/0AMsZTTz2lTZs26ZFHHtHu3btVV1en9evXq62t7YqPO3XqlL7whS/olltuSdKkQPLE+kCWTi+Sw0EpOjBesU6QIJ0gAGDUrYvKJXESC8DkIgQBMtj6ZZUqyXPrbM+wXqJYDECGeOyxx3Tfffdpw4YNWrp0qbZu3aq8vDw98cQTl31MKBTSpz/9aX31q1/V3LlzkzgtkBwHon0gyylFBybE5Yg8NQ7wwiEAMCp2EmvHkTbZNn8mA5gchCBABstxO/XH10UK0n/0BgXpANKf3+/Xrl27tG7duvj7HA6H1q1bp507d172cV/72tdUXl6uz3zmM8kYE0i62CZILSEIMCGxc1ihMJsgAGDSDXOmKtftVGuvTw3n+kyPAyBDEIIAGe5TN0QK0n93qFXNFKQDSHMdHR0KhUKqqKi46P0VFRVqaWkZ9TGvvPKKfvCDH2jbtm1j/nV8Pp96e3svegNSVShs62CsFL26yPA0QHqKncMKUIwOAEbluJ1aO79UUuT7GAAwGQhBgAw3v7xQN80rVdiWfvT6adPjAEBS9fX16Z577tG2bdtUVlY25sdt2bJFxcXF8beampoETglcm5Md/Rr0h5TncWpOWYHpcYC05HJGnhrTowcA5r1/ceQFT787dOXOPwAYK0IQIAvcWz9bkvSTt5o0HAiZHQYArkFZWZmcTqdaWy9+VVhra6sqKysv+fjjx4/r1KlTuvPOO+VyueRyufTP//zPevbZZ+VyuXT8+PFRf53Nmzerp6cn/tbU1JSQzweYDPubI1sgS6uK5KQUHZgQitEBIHXctjjSC/JOU7c6+32GpwGQCQhBgCywbkm5phfnqGvAr+fePWd6HACYMI/Ho9WrV2v79u3x94XDYW3fvl319fWXfPzixYu1b98+7dmzJ/72sY99TLfddpv27Nlz2Q0Pr9eroqKii96AVEUfCHDtYpsgnMMCAPOqinO1tKpIti39/ki76XEAZABCECALuJwOffrGWZKkf955yuwwAHCNNm3apG3btunJJ59UQ0OD7r//fg0MDGjDhg2SpHvvvVebN2+WJOXk5Ki2tvait5KSEhUWFqq2tlYej8fkpwJMiv1nIyHIsumEdcBExTZBOIcFAKnhA0vKJUnbOYkFYBIQggBZ4v96T408Tof2nunRnqZu0+MAwITddddd+ta3vqWHH35YK1eu1J49e/TCCy/Ey9IbGxt17hxbb8gO4bCtA82xUnQ2QYCJihejhzmHBQCp4LbFkRDkpcPtCnCqEMA1cpkeAEBylBZ49dEVVfrZO83659dOaeVdK02PBAATtnHjRm3cuHHUf7djx44rPvaHP/zh5A8EGNLYNag+X1Ael0PzyylFByYqXozOOSwASAl1M0pUmu9R54Bfb586r/p5paZHApDG2AQBssi9N82WJP3y3XPqoFwMAIC0FzuFtaSqSG4nX9oDEzWyCUIIAgCpwOmw9L5FkYL03x1qNTwNgHTHMyUgi6ysKVHdjGL5Q2E99VaT6XEAAMA12h87hUUfCHBNXM5ICBLk5AoApIwPLI6cu/0dvSAArhEhCJBl7q2fLUn6l9dP8yQPAIA0dyC6CUIfCHBtKEYHgNRzy8IyuRyWjrcP6FTHgOlxAKQxQhAgy3xkRZVK8z061zOsf9/fYnocAAAwQbZta39zNASZTggCXItYJ0iAThAASBlFOW69Z/ZUSWyDALg2hCBAlslxO/VnN86SJP3Tyydk2zzRAwAgHTV3D+n8YEBup6WFlZSiA9fCHd8EYVMaAFLJ+xeXS5JePEwIAmDiCEGALHRP/Sx5XA7tPdOjt0+fNz0OAACYgFgfyMKKQnldTsPTAOnN6YhugnAOCwBSyvuXREKQ1090qt8XNDwNgHRFCAJkobICr/5oVbWkyDYIAABIP/E+EE5hAdeMYnQASE1zy/I1uzRPgZCtl4+0mx4HQJoiBAGy1GduniNJ+vXBVgrGAABIQ/tifSDVRYYnAdJfrBg9yCYIAKQUy7K0bkmFJOk3B1sNTwMgXRGCAFlqQUWhbl00TbYt/e9XT5oeBwAAjMOFpejLqtkEAa5VrBg9SDE6AKSc25dVSpK2H2pTgI09ABNACAJksftumStJ+unbZ9QzGDA8DQAAGKu2Pp86+v1yOiwtrWITBLhWbmesGJ0QBABSzepZUzQ136OeoYDeOtVlehwAaYgQBMhiN80r1eLKQg0FQvrRm6dNjwMAAMZo35nIFsj8aQXKcVOKDlwrZ/QcFq8wBoDU43RYev/iSEH6rw9wEgvA+BGCAFnMsqz4NsiTr52SP8iTPgAA0sH+s7FTWGyBAJPB7Yiew2ITBABS0u1LR3pBbJs/qwGMDyEIkOXurJuu8kKvWnt9euadZtPjAACAMdjf3CtJqp1OHwgwGZwUowNASrtlwTTluB1q7h7SwXO9pscBkGYIQYAs53E54tsg39txTEFOAAAAkPJipejLZxCCAJPBFe0E4WthAEhNuR6nblkwTVJkGwQAxoMQBIDuXjNTU/LcOtU5qF++e870OAAA4Ara+3xq6R2WZUlLKEUHJoXbyTksAEh1H7zgJBYAjAchCADle136bHQb5H++eExhnvwBAJCyDkT7QOaU5avA6zI8DZAZ4uew2AQBgJT1gcXlcljSgbO9OnN+0PQ4ANIIIQgASdI99bNUlOPSsbZ+vXCgxfQ4AADgMuKnsKo5hQVMFreTThAASHWlBV5dP2uqJOm3bIMAGAdCEACSpKIct/7j2jmSpO/+7phsmyeAAACkIkrRgcnndETPYYX4GhgAUlnsJNavCUEAjAMhCIC4DTfNVr7HqYZzvdre0GZ6HAAAMIr90XNYy6rpAwEmizt2DivMOSwASGWxEOSNk13qGQwYngZAuiAEARA3Jd+jP6ufJUn67otsgwAAkGq6B/06c35IkrSMTRBg0rgoRgeAtDC7LF8LKwoUCtv63WG2QQCMDSEIgIvcd8tc5bgd2tvUrd8faTc9DgAAuEDsFNas0jwV57oNTwNkjpFidEIQAEh165dVSpKe30efKYCxIQQBcJGyAq8+vSayDfJ3/35IIV4NBwBAyoidwqIPBJhc8WL0EOewACDV3bG8SpL0+yPt6vcFDU8DIB0QggC4xH95/3wV57p1qKVP//p2k+lxAABA1P5m+kCARIhvgvACIABIeYsrCzWnLF/+YFjbGziJBeDqCEEAXKIkz6O/+sACSdK3f3OEV1YAAJAiYiEImyDA5HLTCQIAacOyLN2xPHIS6985iQVgDAhBAIzqnhtnaXZpntr7fPpfvz9uehwAALJe73BApzoHJUm11YQgwGRyRTdBApzDAoC0EDuJ9eLhNg3wwk0AV0EIAmBUHpdDm+9YIkn6/ksndLZ7yPBEAABkt4NnI6Xo04tzNDXfY3gaILO4HJGnxvThAUB6WFpVpFmlefIFw3rxcJvpcQCkOEIQAJd1+9IK3TBnqnzBsP7hV4dNjwMAQFaLn8JiCwSYdK54MTohCACkg8hJrMg2CCexAFwNIQiAy7IsSw99ZKkk6efvNGtvU7fZgQAAyGKxEGQ5IQgw6VzxYnTOYQFAurijNhKC/O5Qm4b8IcPTAEhlhCAArmj5jGL90XXVkqT/+q97ubUJAIAh+9gEARLGFS1GD9tSmJNYAJAWaquLVDM1V0OBkHZwEgvAFRCCALiqv71jicoLvTrW1q/NP9sn2+aJIQAAyTTgC+pEx4AkQhAgEWLnsCQpwDYIAKQFy7Li2yDP7TtneBoAqYwQBMBVlRV49T/vvk5Oh6Vn957Vv7zRaHokAACySsO5Xtm2VFHk1bRCr+lxgIwTO4clUY4OAOkk1gvyu0NtGg5wEgvA6AhBAIzJDXOm6ksfWixJ+vq/HaQfBACAJIqfwprOFgiQCC7HyFPjAOXoAJA2VswoVnVJrgb9Ie043G56HAApihAEwJh99pY5Wr+sQv5QWH/5o906P+A3PRIAAFlhf3OvJE5hAYnCJggApCfLsnTH8kpJ0i/fPWt4GgCpihAEwJhZlqV/+NM6zSrNU3P3kP7jD99SU9eg6bEAAMh4+ylFBxLK4bAUy0GCITpBACCd3Fk3XZL024ZW9fuChqcBkIoIQQCMS1GOW//46dUqzHFpb1O37vj/vax/28urLQAASJQhf0hH2/okScsJQYCEcTkjT48DbIIAQFpZXl2suWX5Gg6E9esDLabHAZCCCEEAjNvS6UV6/q9u0aqZJeobDuq//J939DdP79Wgn1dcAAAw2Q619CpsS2UFHlUUUYoOJErsJFaIThAASCuWZeljKyPbIL/Yw4s0AVyKEATAhNRMzdNP/3O9Nt42X5Yl/fTtM7rlv7+ov/35Pr16rIMzAgAATJILT2FZlnWVjwYwUbEQJBDm61gASDefWFktSXrlWIc6+n2GpwGQalymBwCQvtxOh76wfpFuXlCm//rTvWruHtKP32jUj99o1NR8j25ZUKaZU/M0vSRX1SW5KivwypatcFgKhsMKhGz1DQfUOxxQz2BAvcNBBUKR94fCYQXDtvI8TpUVeFVa4FVZvkdVJbmaOTVPTgffBAIAZId4Kfp0TmEBiRQ7h0UxOgCkn9ll+aqrKdHepm79cu9Z/ce1c0yPBCCFEIIAuGY3zi3Vjv/7Vu083ql/339OL+xvUdeAP2FrqB6XQ/OmFWhBeYEWVhRo3rQCzSsv0KzSPHldzoT8mgAAmLIvvglSZHgSILPFN0HYaAaAtPTxuuna29StZ/YQggC4GCEIgEnhdjr03oXT9N6F0/T1j9fqjZNd2numW2e7h9R8fkjN3UPqGvDLYVlyOSw5nZZcDocKc1wqznWrKMetwhyXctxOOR3Rj3FYGvAF1THgV0efTx39PjV3D2k4EFbDuV41nOu9aAaHJc2YkqfSAo9Kct2akudRUa5bTocl25Zs2bLt2MdasqzIY4pz3ZpVmq/ZpfmaVZanohy3gd9BAAAu5QuGdKQ1UopeSyk6kFDu6CZIkE4QAEhLH62r0jeeO6g9Td063TmgWaX5pkcCkCIIQQBMOpfTobXzy7R2ftmk/9yhsK3m80M60tqno239OtrWp+PtAzrR1q8+X1CNXYNq7Bq8pl+juiRXn75xpu6+YaZK8jyTNDkAAON3pKVfwbCtkjy3qktyTY8DZLTYudUg57AAIC2VF+Zo7fwyvXy0Q7/Yc1Z/9YEFpkcCkCIIQQCkFafD0szSPM0szdO6pRXx99u2rfY+n051Dur8oF/dg36dHwyoZygg25YsS7IU+THy8VLYjjyuc8Cv050DOtU5qPa+yLbJ379wWN/dfkx/snqGNqydrbnTCsx8wgCArBY7hbWcUnQg4VzOaAjCOSyMweOPP65/+Id/UEtLi+rq6vTd735XN9xww1Uf95Of/ESf+tSn9PGPf1zPPPNM4gcFsszHV1br5aMdemZPs/7L++fz9RMASYQgADKEZVkqL8pReVHONf08/b6gXtjfoh+8clIN53r1/75+Wj9647S++cnl+tQNMydpWgAAxmb/2UgIsoxSdCDhYp0gFKPjap566ilt2rRJW7du1Zo1a/Sd73xH69ev1+HDh1VeXn7Zx506dUpf+MIXdMsttyRxWiC7rF9WoS//3KET7QPa39yr5TP4GgqA5DA9AACkkgKvS3+yeoae/6ub9ePPrtH7Fk5T2Jb+9uf79PSuM6bHAwBkmQMXbIIASCyXI/L0OEAIgqt47LHHdN9992nDhg1aunSptm7dqry8PD3xxBOXfUwoFNKnP/1pffWrX9XcuXOTOC2QXQpz3Fq3JHI14hd7mg1PAyBVEIIAwCgsy9JN88v0ww3v0Z/Xz5JtS//303v5IgoAkDSBUFgNLbFS9CLD0wCZz805LIyB3+/Xrl27tG7duvj7HA6H1q1bp507d172cV/72tdUXl6uz3zmM2P6dXw+n3p7ey96AzA2H185XZL0zJ6zCvBnOgARggDAFVmWpf/2sWX61A0zZdvSpp/u1XPvnjM9FgAgCxxt7Zc/GFZhjkszp+aZHgfIeBSjYyw6OjoUCoVUUVFx0fsrKirU0tIy6mNeeeUV/eAHP9C2bdvG/Ots2bJFxcXF8beampprmhvIJrcuKldpvkcd/T79/nC76XEApABCEAC4Csuy9M1P1OpPVs9QKGzrwZ+8o5eO8IUUACCx9jfH+kCKKPUEksDljDw9DoYIQTB5+vr6dM8992jbtm0qKysb8+M2b96snp6e+FtTU1MCpwQyi8fl0CdXVUuSfvo2/9sBQDE6AIyJw2Hpv//xCgVCYf1iz1l947mDemH+e+Vw8E0pAEBixErR6QMBksMV3wThdAour6ysTE6nU62trRe9v7W1VZWVlZd8/PHjx3Xq1Cndeeed8feFo/8dc7lcOnz4sObNm3fJ47xer7xe7yRPD2SPP72+Rv/0ykn97lCbOvp9Kivgf09ANmMTBADGyOmw9LWP16rA69KR1n79tqH16g8CAGCC9kU3QWoJQYCkYBMEY+HxeLR69Wpt3749/r5wOKzt27ervr7+ko9fvHix9u3bpz179sTfPvaxj+m2227Tnj17OHMFJMiiykLVzShWMGzrmXfo9gSyHSEIAIxDca5b99TPkiQ9vuO4bJsnyQCAyRcMhdVwLlKCyyYIkBxuNkEwRps2bdK2bdv05JNPqqGhQffff78GBga0YcMGSdK9996rzZs3S5JycnJUW1t70VtJSYkKCwtVW1srj8dj8lMBMtqfXh8JGX/6dhPP3YEsRwgCAOP0n9bOkdfl0N6mbr12vNP0OACADHS8fUDDgbAKvC7NLs03PQ6QFShGx1jddddd+ta3vqWHH35YK1eu1J49e/TCCy/Ey9IbGxt17tw5w1MCuLNuurwuh4609uvdMz2mxwFg0IRCkMcff1yzZ89WTk6O1qxZozfffHNMj/vJT34iy7L0iU98YiK/LACkhGmFXn3qhpmSpMdfPGZ4GgBAJoqdwlo6vYj+KSBJ3JzDwjhs3LhRp0+fls/n0xtvvKE1a9bE/92OHTv0wx/+8LKP/eEPf6hnnnkm8UMCWa44160P1Ua6eihIB7LbuEOQp556Sps2bdIjjzyi3bt3q66uTuvXr1dbW9sVH3fq1Cl94Qtf0C233DLhYQEgVdz33rlyOSy9drxTuxvPmx4HAJBh9jdTig4kG5sgAJB5/kP0JNaze89qOBAyPA0AU8Ydgjz22GO67777tGHDBi1dulRbt25VXl6ennjiics+JhQK6dOf/rS++tWvau7cudc0MACkguqSXH1yVbUk6XtsgwAAJtn+eCl6keFJgOzhckZDkBCdIACQKernlqq6JFd9w0H96kCL6XEAGDKuEMTv92vXrl1at27dyE/gcGjdunXauXPnZR/3ta99TeXl5frMZz4zpl/H5/Opt7f3ojcASDWfu3WeLEv6bUNbvLwWAIBrFQrbOnCWUnQg2dyO6DksNkEAIGM4HJb+9PoZkjiJBWSzcYUgHR0dCoVC8bKvmIqKCrW0jJ6mvvLKK/rBD36gbdu2jfnX2bJli4qLi+NvNTU14xkTAJJi3rQC3VFbJUn6/ksnDE8DAMgUJ9r7NRQIKc/j1JyyAtPjAFnDGd8EIQQBgEzyx9fNkGVJrx7r1KmOAdPjADBgQsXoY9XX16d77rlH27ZtU1lZ2Zgft3nzZvX09MTfmppIagGkpv908xxJ0m8PtirA6QQAwCTYfzZail5VFO8oAJB47ngnCF/TAUAmqZmap/cumCZJ+vGbjYanAWCCazwfXFZWJqfTqdbW1ove39raqsrKyks+/vjx4zp16pTuvPPO+PvC0S8oXS6XDh8+rHnz5l3yOK/XK6/XO57RAMCIVTUlmprvUdeAX7tOn9eNc0tNjwQASHP7zkROYdVyCgtIKpeTc1gAkKn+7MZZ+v2Rdv3r203a9MGFynE7TY8EIInGtQni8Xi0evVqbd++Pf6+cDis7du3q76+/pKPX7x4sfbt26c9e/bE3z72sY/ptttu0549ezhzBSDtORyW3rcw8oqSHYfbDU8DAMgEI6XohCBAMrkcFKMDQKZ6/+JyTS/O0fnBgP59/znT4wBIsnGfw9q0aZO2bdumJ598Ug0NDbr//vs1MDCgDRs2SJLuvfdebd68WZKUk5Oj2trai95KSkpUWFio2tpaeTyeyf1sAMCAWxfFQpA2w5MAANJdOGzrQPQcFqXoQHK5op0gATpBACDjOB2WPnXDTEnSv7zOSSwg24zrHJYk3XXXXWpvb9fDDz+slpYWrVy5Ui+88EK8LL2xsVEOR0KrRgAgpdyyYJosSzrU0qdzPUOqKs41PRIAIE2d7BzQgD+kHLdD86blmx4HyCrO6PPYEOewACAj3XVDjf6f7Ue16/R5NZzr1ZKqItMjAUiSCaUVGzdu1OnTp+Xz+fTGG29ozZo18X+3Y8cO/fCHP7zsY3/4wx/qmWeemcgvCwApaWq+R3UzSiRJv+ckFgDgGsROYS2pKor3EwBIDorRASCzlRfmaP2ySKfxv7x+2vA0AJKJZ1YAMAlGTmIRggAAJi4WgnAKC0i+eDE657AAIGN9+sbISaxn3mlWvy9oeBoAyUIIAgCT4LZF5ZKkV491KECZJgBggvZRig4YEy9G5xwWAGSs+rmlmjctXwP+kH7+TrPpcQAkCSEIAEyC5dXFKs33qM8X1K7T502PAwBIQ+GwrQPNvZKk2umEIECyjRSj84IWAMhUlmXpz26cJUn60eunZdsE30A2IAQBgEngcFh670JOYgEAJq6xa1B9vqA8LocWVBSYHgfIOrFNEIrRASCz/dF1M5TrdupQS5/ePNllehwASUAIAgCTZKQXpM3wJACAdLTvglJ0N6XoQNLFOkECdIIAQEYrznXrk9dVS5J+8MpJw9MASAaeXQHAJLllwTRZlnSopU/neoZMjwMASDOxUvTa6UWGJwGy08gmCOewACDT/ae1syVJv2lo1enOAbPDAEg4QhAAmCRT8z2qm1EiSfo9J7EAAOMU2wRZTik6YESsE4RidADIfPPLC/W+hdNk29L/fvWU6XEAJBghCABMotsWlUuiFwQAMD62bY9sghCCAEa4HLFzWGyCAEA2+MzNcyRJ//p2k3qHA4anAZBIhCAAMIlivSCvHuugVBMAMGaNXYPqHQ7K43RoYUWh6XGArEQxOgBkl1sWlGlhRYEG/CH99K0m0+MASCBCEACYRLXVxcrzONXnC+pYW7/pcQAAaSJ2CmtxVaE8Lr5EB0ygGB0AsotlWfpPayPbIP/71VMKsgkIZCyeYQHAJHI6rHgvyDuN580OAwBIG/ubeyVxCgswKdYJwiYIAGSPT6yq1tR8j5q7h/Trg62mxwGQIIQgADDJrptVIknaTQgCABij/ZSiA8bFzmHRCQIA2SPH7dSn18yUJP3glZOGpwGQKIQgADDJVtVMkSS909htdhAAQFqwbTt+DosQBDAnVoweZBMEALLKPTfOkttpadfp81x0ADIUIQgATLKVM0skSUfb+tUzFDA7DAAg5Z05P6SeoQCl6IBhnMMCgOxUXpSjj6+sliR9b8dxw9MASARCEACYZGUFXs2cmidJevdMt9lhAAApL7YFsqiSUnTAJM5hAUD2uv/WebIs6TcHW9Vwrtf0OAAmGc+yACABVkW3QTiJBQC4mlgIQik6YJbbGXl6zCYIAGSfedMK9JHlVZKk//niMcPTAJhshCAAkACrakokUY4OALg6StGB1OCMb4IQggBANtr4/vmSpOf3ndOxtj7D0wCYTIQgAJAAq2aOlKPbNk+kAQCju7AUvba6yPA0QHZzRztBgmHOYQFANlpcWaTbl1bItqXvvUg3CJBJCEEAIAGWVBXJ63KoZyigkx0DpscBAKSoM+eH1D0YkNtpaVElpeiASU5H9BwWmyAAkLVi2yC/2HtWpzt5Lg9kCkIQAEgAj8sRP2tCLwgA4HJip7AWVhTK63IangbIbvFidDZBACBrrZhRovctnKZQ2NY/7mAbBMgUhCAAkCDxcvQmekEAAKPbRx8IkDIoRgcASNJffSCyDfL/7T6j5u4hw9MAmAyEIACQIBf2ggAAMJqRPhBCEMC0C4vR6XQDgOy1etZU1c8tVSBk67vbj5oeB8AkIAQBgASJbYIcaunToD9odhgAQMqxbTt+DotNEMC8WDG6xDYIAGS7L6xfKEl66u0mHTjbY3gaANeKEAQAEqSqOFeVRTkKhW29e4YvmgAAF2vuHtL5wYBcDkrRgVQQ2wSRpCAhCABktdWzpuqjK6pk29LXf3mQDUEgzRGCAEACxXtBOIkFAPgDF5ai57gpRQdMi3WCSIQgAADpSx9eLI/LoddPdOnXB1tNjwPgGhCCAEACjYQglKMDAC5GKTqQWlwXbIKEQoQgAJDtZkzJ01/cMleS9OjzDfIFQ4YnAjBRhCAAkEDxcvSmbtZnAQAX2dfcK0mqnUEIAqSCC89hBcJhg5MAAFLF/bfO07RCr053DurJ106ZHgfABBGCAEACLa8ulsthqb3PpzPnh0yPAwBIEZSiA6nHsqz4NkiQTRAAgKR8r0t/s36RJOm724+po99neCIAE0EIAgAJlON2aklVkSRRjg4AiGvuHlLXgF8uh6XFlKIDKSO2DRJkEwQAEPXH181QbXWR+nxBPfpcg+lxAEwAIQgAJNiK6JmTvWe6zQ4CAEgZlKIDqSlWjs4mCAAgxuGw9NWPLZPDkn72TrN+8maj6ZEAjBMhCAAkWF1NiSRpT1O30TkAAKkjVoq+gj4QIKW4nLFNEEIQAMCI1bOm6r/eHjmL9fCzB7SPSw9AWiEEAYAEWxkNQfad6VEwxGkFAMDIicRa+kCAlOLiHBYA4DLuf988rVtSIX8wrM/9yy6dH/CbHgnAGBGCAECCzZtWoHyPU0OBkI6195seBwBgGKXoQOpyOTiHBQAYncNh6dv/oU6zSvPU3D2kB5/aoxCbg0BaIAQBgARzOiwtj/WCcBILALLemfNDOj8YkNtpaXEVpehAKhkpRuebWgCASxXnurX1z1Yrx+3QS0fa9fcvHFKYvzOAlEcIAgBJMNILwt1QAMh2F5aie12UogOpxB3rBOGEKQDgMpZUFembn1guSfpfL53QZ558S12cxgJSGiEIACTByhklkqR3z3QbnQMAYB6l6EDqcjkjT5EDnMMCAFzBH6+eoW9+slYel0MvHm7Xh/+fl/T6iU7TYwG4DEIQAEiC2CbIoZY+DQdCZocBABgVC0EoRQdST6wYnRvvAICr+fSaWXrmL9dq7rR8tfb6dPe21/Xo8w1qONcr2+bvESCVuEwPAADZoKo4R9MKvWrv8+nA2R6tnjXV9EgAAANs2x7ZBKkuMTsMgEu4ouewAmHOYQEArm7p9CL98r/crId/cUBP7zqj7790Qt9/6YSqS3K1bkm51s4v07RCr0ryPCrJdaswxyWnw5JlWaZHB7IKIQgAJIFlWaqbUaLfNrRqTxMhCABkqzPnh9QdLUVfWFlgehwAf8DpiBxLCHEOCwAwRnkel771p3Vat6RCT+9q0stHO9TcPaQnd57WkztPX/XxLocl5wVveR6nCrwuFeZEQpOiHLeKcl0qynWrONetQq9LXrdTXpdDXpdTbqcl25ZCti3bthUKS8FwWP5gWIGQrWA4rGDIVti2FQrbCv3BloolS5YlOayR/2xZlv4wpok9ajxbLmHblm1HHhu2bYXDkflC4bAuXLqMZUJ/+GuGbFvBkK1g2FYwFI7PYCkyo8Oy5HJacjmib06HnI7o+x2WHA5LdnSGsG3LVmTbMxAKR3+0L/l8/vDzdFhW5Pc6+nue43bGf26nFfn/WdiOzBgKR370BUIa8oc0FIi81c0o0SdWVY/59w2TjxAEAJJkZU2xftvQqr1N3aZHAQAYEtsCWVRJKTqQitzRc1hBNkEAAOP0odpKfai2UkP+kF491qHfNrRqX3OPeoYC6hkMqM8XHPVxweg3zmP6hoNqlS9ZYyNJVs0s0azSfNNjZC1CEABIklgvyF7K0QEga717JhKCLOcUFpCS4uew2AQBAExQrsepdUsrtG5pxUXvD4TC6h8OKhzdMLCl+IZCMGwrFLIVCIc15A+pbziovuFA/MeeoaB6hgLqHQ6ofzgofygsXzAkXyCsQCgc3YqIbC04LEtulyWXwyG30yG38+JNE6dlxTcvYksQYVuyNbIx8YfVWLZtx7dDLGtkE+NKLnyMorPFNihc0W2NCz92tL95HZYVnd8RfczI75sU2RQJhWPbIpHNl3B02yW2lRH7fbGimy6x34/IBo5Dzgsas2Of04WfWShsyxeM/H4PByI/xrZZQrYUDtvxzyn2+5vjdijX41SO26mXj3boWFu/ntt3Tn956/wr/p4hcQhBACBJYrffT3cO6vyAX1PyPWYHAtLY448/rn/4h39QS0uL6urq9N3vflc33HDDqB+7bds2/fM//7P2798vSVq9erUeffTRy348kEj7m2MhCKXoQCpyxc5hUYwOAJhkbqeD7wNkof/zZqM2/2yffrmXEMQkx9U/BAAwGYrz3JpTFll9ZBsEmLinnnpKmzZt0iOPPKLdu3errq5O69evV1tb26gfv2PHDn3qU5/Siy++qJ07d6qmpka33367mpubkzw5st1FpegzCEGAVDSyCcI5LAAAcO3WL6uU02Hp4LlenWjvNz1O1iIEAYAkqot+0yt2DgXA+D322GO67777tGHDBi1dulRbt25VXl6ennjiiVE//kc/+pH+8i//UitXrtTixYv1T//0TwqHw9q+fXuSJ0e2a+oaUs9QQB6nQwsrCk2PA2AUrmgnCJsgAABgMkzN92jt/DJJ0nPvnjM8TfYiBAGAJIr3glCODkyI3+/Xrl27tG7duvj7HA6H1q1bp507d47p5xgcHFQgENDUqVMv+zE+n0+9vb0XvQHX6sJSdI+LL8OBVBQ7hxUgBAEAAJPko8urJEnP7SMEMYVnXwCQRBeWo9s2T66B8ero6FAoFFJFxcUlgxUVFWppaRnTz/HFL35R06dPvyhI+UNbtmxRcXFx/K2mpuaa5gYk6d3mbknSck5hASkrdg4ryDksAAAwSdYvq5TbaelQS5+OtfWZHicrEYIAQBItrSqSy2Gpo9+v5u4h0+MAWefv/u7v9JOf/EQ///nPlZOTc9mP27x5s3p6euJvTU1NSZwSmYpSdCD1cQ4LAABMtuI8t26OnsT6JSexjCAEAYAkynE7taSqSJK0u7Hb7DBAGiorK5PT6VRra+tF729tbVVlZeUVH/utb31Lf/d3f6df//rXWrFixRU/1uv1qqio6KI34FrYtq19ZwhBgFTnckbPYYUIQQAAwOT56IrpkiIhCJdBko8QBACS7IY5kR6Cncc7DU8CpB+Px6PVq1dfVGoeKzmvr6+/7OP+/u//Xl//+tf1wgsv6Prrr0/GqMBFGrsG1TsclMdFKTqQykY2QTiHBQAAJs8Hl1XI43ToWFu/jrT2mx4n6xCCAECS3TSvVJK083iH4UmA9LRp0yZt27ZNTz75pBoaGnT//fdrYGBAGzZskCTde++92rx5c/zj//t//+966KGH9MQTT2j27NlqaWlRS0uL+vv5whPJsze6BbKkqohSdCCFxTpB2AQBAACTqSjHrfcunCZJ+uW7Zw1Pk314BgYASXbDnKlyOiyd6hykFwSYgLvuukvf+ta39PDDD2vlypXas2ePXnjhhXhZemNjo86dG7mz+o//+I/y+/36kz/5E1VVVcXfvvWtb5n6FJCF9p3pliSt4BQWkNJcjshT5CCbIAAAYJJ9dEWVJOk5TmIlncv0AACQbQpz3Foxo1jvNHZr5/FO/cnqGaZHAtLOxo0btXHjxlH/3Y4dOy7651OnTiV+IOAq3o31gcwgBAFSWewcVpBidAAAMMnWLa2Qx+XQiY4BHW3r50xuErEJAgAGxE5ivXaMk1gAkOnCYVv7myMhyApCECClxYrRg5zDAgAAk6zA69KSqiJJ0smOAcPTZBdCEAAw4KZ5ZZKk1453sgIJABnuREe/Bvwh5bqdmj+twPQ4AK4g1tnjC4YMTwIAADJReaFXktTW5zM8SXYhBAEAA1bPmiKPy6GW3mHSfwDIcLFTWMumF8VfZQ4gNeV5nJKkQT8hCAAAmHyxEKSdECSpeBYGAAbkuJ1aPXOKJOnV452GpwEAJBJ9IED6iIUgQ4QgAAAgAcoLcyRJ7X3DhifJLoQgAGBIrBdk53F6QQAgk+2jDwRIG7luNkEA/P/bu/Moqesz3+OfqupauuidphsaGlpQJKjQAgHRMEanR2Z0jM4ZbziaIMM1ZlFy54S5GSUa2oQZIV7H8Z6EyARDzLlDgjE3OolyMEoguSiRhE0joIEGmsVe6X2t5Xf/qK5CNqWb31L16/frnDpK+avi6a809XQ99TwPAFhnVHIcVjudIHaiCAIADrn+8mQRpFnxOHtBAMCNorG43j2ZLIIUOBsMgI8VDmRJohMEAABYIzUOq5MiiJ0oggCAQ6aNK9CIgE8t3REdqOtwOhwAgAX+3NCp3khcOcEsXTZyhNPhAPgYqZ0gkajDkQAAADcqyaMTxAkUQQDAIX6fV7MvK5IkvclILABwpXcG9oFcPTZPXq/H4WgAfBwWowMAACslx2E1dfYxFcRGFEEAwEHXTyqWJL3JcnQAcKW3T7RKYhQWkCkYhwUAAKxUnBOUxyNF44ZauvudDmfYoAgCAA6aO7Ac/a2aZkVicYejAQCY7e2BTpBrxrIUHcgE2XSCAAAAC/l9XhWFA5Kkhg5GYtmFIggAOGjqmDwVhP3q6o+l3igDALhDXzSm/R+0S5Km0wkCZITkOCw6QQAAgFWSI7EogtiHIggAOMjr9WjuxEQ3yJYDDQ5HAwAw0/t1nYrEDOVn+1VelO10OAAuQrII0h+LK0qXLgAAsECyCNJIEcQ2FEEAwGG3TRsjSVr/1lF190cdjgYAYJbT+0Dy5fGwFB3IBMlxWJLUHaEbBAAAmO90J0ivw5EMHxRBAMBhf3P1GE0YGVZLd0TP/+GY0+EAAEzy9jH2gQCZJuDzyudNFC0ZiQUAAKxQkhuSJDW00wliF4ogAOAwn9ejL/7FREnSs//vMAvSAcAl3j6RKIJMYx8IkDE8Ho/C/kQ3SFcfHboAAMB8JclxWJ0UQexCEQQA0sDfzxin4pygTrT26Fd7TzodDgDgEvVGYnq/vkNSYhwWgMyRHInVTScIAACwQGonCJ0gtqEIAgBpIOT36b9/qkKStOa3hxSPG84GBAC4JO+ebFcsbqg4J6Ax+SGnwwEwCMnl6D3sBAEAABagE8R+FEEAIE18/roJyg1m6f36Tv3mQIPT4QAALsHeY62SpOnjCliKDmSY7ECWJDpBAACANUrykjtBWIxuF4ogAJAm8kJ+3XPdeEnSM7895HA0AIBL8fbxVknsAwEyUaoTpJ+dIAAAwHzJcVhd/TF2kNmEIggApJH7brhMAZ9XO4+26A9HTjkdDgBgiPYeTyxFn17OPhAg04TZCQIAACyUE8xK5RuNHYzEskOW0wEAAE4ryQvp72eO00931Oprz+/R9z83w9RPEcfihho6enW8pUcnWnrU1R9VNGYoEosrGjfkkeTzepTl9cjn8yro8yoc9GlEIPECHQ5kKeT3KpjlU9Dvld/nVTQWV180cYvE4gpkeZUTzNKIYJbCfp+8XsbAABhe2rojOtzUJSkxDgtAZsn2UwQBAADWKskN6khztxo6+lRRPMLpcFyPIggApJn/8ZeXa9vBRh071aO7ntmuR//2E1p43YRBz5Tvj8a1/4N27a5t0e5jrXr7eJuOt3QrErNv6brHI90ytVRrPj+TmfgAho23T7RKksYXhVU4IuBsMAAG7fQ4LIogAADAGqNSRRD2gtiBIggApJkx+dl6+avz9PUX9urX++q1/L/e1VuHT+nxO69Rfth/wce1dUe0s/aU/nCkRTuPtGjv8Vb1RePnXOfzelRWENK4grByQ1ny+7yJ7g9fokgRixuKxg3FYob6ojF19yduXf1RdffF1BeNqS8aV28kpriRKHQEs7wK+LwKZHnVF42rqy+quCEZhvTqu/V69d16/fXVoy07MwBIJ6ml6OUFjsYBYGhYjA4AAKxWkptYjs44LHtQBAGANJSf7dd/LJypdW8c0cqN+/XK2x/olbc/0NiCbE0qydGkUSMU8vt0oqVHJ1oTo63q2s/99EBB2K9rywt07fhCVZYXaFJJjkpzg8rymbMSKhY35PXonC4PwzDUG4nrf2/+s9b89pCeePWAqj5RYtrvCwDpLLUPZBz7QIBMlNoJEmFRKQAAsEZyOXoDRRBbUAQBgDTl8Xh036cu07XjC/Q/X9irmsauRMGjtUe/e7/xvI+ZWDxCsyoKNWtCkWZWFGpi8QhLx1D5LrDvw+PxKDvg0wM3TdLzf6hVTWOXXth5XHfPHm9ZLACQDgzD0B46QYCMxjgsAABgtVQRpJ0iiB0oggBAmpsxvlC/+adPq6WrX4caO3WwIXGLxOIqK8jWuMKwxhZma0Iazp7PC/m15OYrtOLlfXr69fd1Z+VYZQ+8sQAAblTX3qvGjj75vB5dVZbndDgAhiCZqzAOCwAAWKVkoAjS2EkRxA4UQQAgQxSOCGjWiCLNqihyOpRB+fx147Vu22GdaO3Rj948rAc+fbnTIQGAZfYeS4zCuqIkR+EAqTaQicJ+OkEAAIC1SvISO0EazjPaHOZjODsAwFLBLJ/+6ZbJkqRnth5Sa3e/wxEBgHX2Hm+VJFUyCgvIWOHUYnR2ggAAAGuMyhnoBGEniC0oggAALHdH5VhNGZ2rjt6ovr/1kNPhAIBl3h4ogrAPBMhcjMMCAABWK8lLFEFOdfcrEos7HI37UQQBAFjO5/Xoob+ZIkl67s0jauuOOBwRAJgvHjf09sA4rGnj8h2OBsBQpRajRyiCAAAAaxSFA/J5PTIMqbmTiRlWowgCALDFpyePUll+SP3RuP7c0OF0OABgupqmLnX0RRXyezW5NNfpcAAMEZ0gAADAal6vR8U5AUmMxLIDRRAAgC08Ho8qikdIko42dzscDQCYLzkK66qyfPl9pNlApkruBGExOgAAsFJJ7sBy9A6Wo1uNn84AALaZMDIsSTra3OVwJABgvr3HWiVJ08cVOBoHgEsTTnWCsBgdAABYZ1RuYi9IA50glqMIAgCwzYSRA50gp+gEAeA+e48n9oFML2cfCJDJsv2MwwIAANYrGSiCMA7LehRBAAC2mVCU6AQ5wjgsAC7TH41r38l2SXSCAJku2QnSF40rFjccjgYAALhVSaoThHFYVqMIAgCwTbITpJZxWABc5kBdu/pjceVn+1Oj/wBkpuROEEnqidANAgAArJEah9VOJ4jVKIIAAGwzfuCNwZbuiNp6Ig5HAwDm2TOwD2TauHx5PB5ngwFwSUJ+r5LfxuwFAQAAVhk1sBi9sZMiiNUoggAAbJMTzFJxTkCSVMtILAAusru2VZI0Y3yhs4EAuGQejye1F6SHvSAAAMAiJXl0gtiFIggAwFanl6MzEguAe+yqbZEkzZhAEQRwg+ReEJajAwAAq4zKOb0Y3TDYQ2YliiAAAFsll6MfpRMEgEs0d/al/k6rZCk64ArZFEEAAIDFkjtB+mNxtfcwgtNKFEEAALZKdYKwHB2ASyT3gUwaNUL5Yb+zwQAwRdifWI7OOCwAAGCVkN+n/OzEzw8NHb0OR+NuFEEAALaaMJJOEADukhqFxT4QwDVOd4LwqUwAAGCdZDdIQwd7QaxEEQQAYKvxFEEAuExyKfq1FEEA10juBOmJ0AkCAACsU5J7ei8IrEMRBABgq4qBcVh17b3q5Y0FABkuFje0d2Ac1owJBY7GAsA8LEbHx1m9erUqKioUCoU0Z84c7dix44LXrl27VvPmzVNhYaEKCwtVVVX1kdcDAIaP050gjMOyEkUQAICtCsN+5QYTc7ZrT9ENAiCzvV/foa7+mEYEfLqiJNfpcACYJDuQyFUoguB8nn/+eS1dulTV1dXatWuXpk+frvnz56uhoeG812/dulV33323tmzZou3bt6u8vFy33HKLTpw4YXPkAIB0U5yTKII0d/Y7HIm7UQQBANjK4/FoQjEjsQC4Q3IU1vTyAvm8HmeDAWCasH9gHBY7QXAeTz31lO6//34tXrxYU6dO1Zo1axQOh7Vu3brzXr9+/Xo98MADqqys1JQpU/Tss88qHo9r8+bNNkcOAEg3ySJIYyfjsKxEEQQAYLsJRYmRWEebuxyOBAAuDUvRAXdKLkbvohMEZ+nv79fOnTtVVVWVus/r9aqqqkrbt2+/qOfo7u5WJBJRUVGRVWECADJEcU5AktREJ4ilspwOAAAw/ExgOToAl9g9UAS5dnyBs4EAMFVqMTpFEJylqalJsVhMpaWlZ9xfWlqqAwcOXNRzPPTQQyorKzujkHK2vr4+9fWd/lRwe3v70AIGAKS1ZCdIE4vRLUUnCADAdqkiCDtBAGSw1u5+HWpMdLRdSycI4CqnF6MzDgvmWrVqlTZs2KAXX3xRoVDogtetXLlS+fn5qVt5ebmNUQIA7JLaCdJFEcRKFEEAALYbzzgsAC6w51irJKliZFhFIwLOBgPAVCxGx4UUFxfL5/Opvr7+jPvr6+s1evToj3zsk08+qVWrVunXv/61pk2b9pHXLlu2TG1tbanbsWPHLjl2AED6Kc5N/BzR3NmveNxwOBr3oggCALBdxcBi9BMtPYrG4g5HAwBDk1yKzj4QwH0Yh4ULCQQCmjlz5hlLzZNLzufOnXvBxz3xxBNasWKFNm3apFmzZn3s7xMMBpWXl3fGDQDgPiNHJDpBonFDbT0Rh6NxL4ogAADbleaGFMjyKho3dLK11+lwAGBIdrEPBHCt0+OwKILgXEuXLtXatWv14x//WPv379dXvvIVdXV1afHixZKke++9V8uWLUtd/53vfEff/OY3tW7dOlVUVKiurk51dXXq7Ox06ksAAKSJQJZX+dl+SVJTJyOxrMJidACA7bxejyYUhfXnhk4dae7S+IEdIQCQKeJxIzUOi30ggPtk+weKIBGKIDjXggUL1NjYqOXLl6uurk6VlZXatGlTall6bW2tvN7Tnzl95pln1N/fr7vuuuuM56murtZjjz1mZ+gAgDQ0Miegtp6Imjr7dUWp09G4E0UQAIAjJoxMFEFYjg4gEx1q7FRHb1Qhv1dTRuc6HQ4Ak4UHdoL0sBgdF7BkyRItWbLkvP9t69atZ/z6yJEj1gcEAMhYxTlB1TR20QliIcZhAQAckVqO3sRydACZJ7kPZNq4AmX5SKkBtwkHGYcFAADsMSonsReEIoh1+IkNAOCI5HJ0OkEAZKI/Hj0liaXogFuxGB0AANilOCcgiSKIlSiCAAAcMb5ooAjSTCcIgMzz1uFEEWTOZUUORwLACmF/YhwWnSAAAMBqI5OdIB39DkfiXkMqgqxevVoVFRUKhUKaM2eOduzYccFr165dq3nz5qmwsFCFhYWqqqr6yOsBAMNDxcjEOKzaU90yDMPhaADg4tW19epoc7e8HmlmBZ0ggBtlJztBIjHF4+QpAADAOsUDRZDmLjpBrDLoIsjzzz+vpUuXqrq6Wrt27dL06dM1f/58NTQ0nPf6rVu36u6779aWLVu0fft2lZeX65ZbbtGJEycuOXgAQOYaW5gtn9ej3khcDR280APIHDuOJLpAppblKS/kdzgaAFZIjsOSpN4o3SAAAMA6yXFYjZ10glhl0EWQp556Svfff78WL16sqVOnas2aNQqHw1q3bt15r1+/fr0eeOABVVZWasqUKXr22WcVj8e1efPmSw4eAJC5/D6vxhZkS5KOsBwdQAbZcbhZkjS7YqTDkQCwSrb/dBGEkVgAAMBKxbnJcVh8QNQqgyqC9Pf3a+fOnaqqqjr9BF6vqqqqtH379ot6ju7ubkUiERUVXXh+cl9fn9rb28+4AQDcJ7kX5FhLj8ORAMDF2zGwD2Q2+0AA1/J6PQr5Ez8usxwdAABYqXjEQBGks49x4RYZVBGkqalJsVhMpaWlZ9xfWlqqurq6i3qOhx56SGVlZWcUUs62cuVK5efnp27l5eWDCRMAkCHGFSY6QY63dDscCQBcnFNd/Xq/vlMSRRDA7cIBlqMDAADrFecmxmH1RePqIu+wxJAWow/VqlWrtGHDBr344osKhUIXvG7ZsmVqa2tL3Y4dO2ZjlAAAuyTHYZ2gEwRAhkh2gUwuzVHRiIDD0QCwUnIkVnd/1OFIAACAm4UDWal9ZIzEskbWYC4uLi6Wz+dTfX39GffX19dr9OjRH/nYJ598UqtWrdLrr7+uadOmfeS1wWBQwWBwMKEBADLQuKJkJwhFEACZgVFYwPCRfDOCcVgAAMBqxTlB1Z7qVlNnnyqKRzgdjusMqhMkEAho5syZZyw1Ty45nzt37gUf98QTT2jFihXatGmTZs2aNfRoAQCuMq4wsRPkeCvjsABkhh1HBpaiX8ZSdMDtkkUQxmEBAACrFeckusybOukEscKgOkEkaenSpVq0aJFmzZql2bNn6+mnn1ZXV5cWL14sSbr33ns1duxYrVy5UpL0ne98R8uXL9dPfvITVVRUpHaH5OTkKCcnx8QvBQCQaZLjsD5o7VUsbsjn9TgcEQBcWHtvRPtOtkuSZlfQCQK4XXayCBKhCAIAAKw1MicxFamxs9/hSNxp0EWQBQsWqLGxUcuXL1ddXZ0qKyu1adOm1LL02tpaeb2nG0yeeeYZ9ff366677jrjeaqrq/XYY49dWvQAgIxWmhdSltejaNxQfXuvygaKIgCQjnYebVHckCaMDGt0/oX32wFwh+Ri9B52ggAAAIsVDxRBmukEscSgiyCStGTJEi1ZsuS8/23r1q1n/PrIkSND+S0AAMOAz+tRWUG2ak9163hLD0UQAGntrZrEPpA57AMBhoVsxmEBAACbjGIclqUGtRMEAACzJUdinWAvCIA0t+Mw+0CA4STspwgCAADsUZyb6ARp6mAclhUoggAAHDWuMFEEOX6qx+FIAODCevpjevt4myQ6QYDhIrkYvYciCAAAsNjIEQNFEDpBLEERBADgqLHJIkgLRRAA6Wt3bYuicUNj8kOp4i0Ad8se2AlCJwgAALBa8cA4rOYuOkGsQBEEAOCocYVhSdKJVoogANLXW4dP7wPxeDwORwPADqlOkAiL0QEAgLVOj8OiE8QKFEEAAI5KjcNqYScIgPT15qEmSewDAYaTMIvRAQCATYpzEkWQjr6oeiPkHmajCAIAcFRyMfrJ1l7F44bD0QDAuVq7+7XzaIsk6cYrRzkcDQC7ZFMEAQAANskLZSngS7xVz14Q81EEAQA4akx+SD6vR/2xuBp5oQeQhn77fqPihnRlaW6qcAvA/ViMDgAA7OLxeDRyYC9IUyd7QcxGEQQA4Kgsn1ej80KSGIkFID1tOdAgSbppSonDkQCwU7Y/uRidnSAAAMB6yZFYzXxA1HQUQQAAjhub2gvCcnQA6SUWN/Tb9xslSTdTBAGGFXaCAAAAOxWnOkEogpiNIggAwHHjKIIASFN7jrWopTui/Gy/ZowvcDocADaiCAIAAOyU7ARhHJb5KIIAABw3rjAsiSIIgPSzeX9iFNZfTB6lLB+pMzCcsBgdAADYaeRAEaSxg04Qs/GTHADAceMGFg2faKUIAiC9/GZgH8jNU0Y5HAkAu4UDiZ0gPewEAQAANkiOw2ruohPEbBRBAACOOz0Oi8XoANLHydYeHajrkMcj3TiZfSDAcJMahxWJyTAMh6MBAABuNyp3YBwWnSCmowgCAHBccjH6iZYe3mQAkDa2vJfoArm2vEBFIwIORwPAbslxWIYh9UXjDkcDAADc7vROEIogZqMIAgBw3Jj8bHk8iTcYWAAGIF1sSY3CogsEGI7Cfl/q39kLAgAArDZyYBwWRRDzUQQBADgukOXV6LyQJEZiAUgPvZGY3jjYLEm6iSIIMCxl+bwK+BI/MnezFwQAAFgs2QnS0h1RJEYXqpkoggAA0sLYguReEJajA3De72ua1ROJaXReSFPH5DkdDgCHJEdi9dAJAgAALFYYDsjrSfx7C8vRTUURBACQFpLL0U+0UgQB4LzkKKybpoySx+NxOBoATkktR6cIAgAALObzelQ0ItEN0shILFNRBAEApIVxhWFJjMMC4LxY3NBr++olSTddySgsYDjLpggCAABsVJzaC0IniJkoggAA0sLYQsZhAUgPW99r0Mm2XhWE/fqLyaOcDgeAg5KdID0RdoIAAADrJfeCNHXQCWImiiAAgLSQGodFEQSAw/7P749Kkj47q1whv8/haAA4KezPkkQnCAAAsEeyE6S5iyKImSiCAADSwulxWD0yDMPhaAAMV0ebu/Tb9xslSZ+bM97haAA4jXFYAADATqlOEMZhmYoiCAAgLYzJD0mSeiIxtXRHHI4GwHC1/q1aGYZ04+RRmjByhNPhAHBYahwWRRAAAGCD0QPvjdQ0djkcibtQBAEApIWQ36eS3MQnHliODsAJvZGYfvbHY5Kke+dOcDgaAOmAThAAAGCn6yaOlCRtP9Skvij5h1koggAA0gbL0QE46Vd7T6q1O6KxBdn69JUlTocDIA2c7gRhMToAALDe1DF5GpUbVFd/TH880uJ0OK5BEQQAkDaSe0FqT9EJAsB+/zmwEP1z142Xz+txOBoA6SAcYDE6AACwj9fr0acnj5IkbTnQ4HA07kERBACQNqaNzZckvbav3uFIAAw3e4+1au/xNgV8Xi2YVe50OADSRLZ/YBxWhCIIAACwx01TEl3pW96jCGIWiiAAgLRxR2WZfF6Pdh5tUU1jp9PhABhGkl0gt00bo5E5QYejAZAuWIwOAADs9qkriuXzenSosUu1zUzKMANFEABA2ijJC+nGgbbPn+887nA0AIaLgw0d+q+9JyVJn7+OhegATgunFqOzEwQAANgjL+TXrAmFkqSt79MNYgaKIACAtHLXzHGSpF/sOqFY3HA4GgBu19UX1Zf/c5f6o3HNu6JYM8YXOB0SgDSSPbATpKuPThAAAGCf1Egs9oKYgiIIACCt/OUnSlQQ9quuvVfbDjY5HQ4AFzMMQ4+8+I4ONnSqNC+of19QKY+HhegATrusOCxJeutws442dzkcDQAAGC5uujJRBHnzULN62U12ySiCAADSSjDLpzuml0liJBYAa/1kR61e2nNSPq9H37tnhorZBQLgLDPGF2reFcWKxAyt3HjA6XAAAMAwMbk0R2X5IfVF4/p9TbPT4WQ8iiAAgLRz18xySdKr79aprTvicDQA3Oid42361i/3SZIe+usr9cmKIocjApCOPB6PHr1tqrweadO7dbwJAQAAbOHxePTpgZFYW99rdDiazEcRBACQdq4em6cpo3PVH43rV2+fdDocAC7zpxNt+sr6neqPxfVXU0t1/7yJTocEII1dOTpXd88eL0la8fI+dpYBAABbJEdi/eZAgwyD/ONSUAQBAKQdj8eTWpD+AiOxcB6rV69WRUWFQqGQ5syZox07dnzk9S+88IKmTJmiUCika665Rhs3brQpUqSThvZeff2Fvbr9e9t0vKVH5UXZevK/TWcPCICPtfSvJis3mKV3T7br/+4iNwEAANa7ftJIBXxe1Z7q1uEmdpNdCoogAIC0dEflWPm8Hu091qo/13c4HQ7SyPPPP6+lS5equrpau3bt0vTp0zV//nw1NDSc9/o333xTd999t+677z7t3r1bd955p+6880796U9/sjlyOCEeN3SwoUPf3fxnffrJrXph53EZhnRHZZle+NL1ys/2Ox0igAwwMieor/7l5ZKk//Xqe+rqizocEQAAcLsRwSzNmZgY27uFkViXxGNkQC9Ne3u78vPz1dbWpry8PKfDAQDY5As//qNe31+vicUjdMPlxZo2Ll+V5QUqyQvJ5/XI5/HI65W8Ho/ihiHDUOKmc1/aPDrzk94f/uC3na+E2QGfac81XF8f58yZo09+8pP63ve+J0mKx+MqLy/XV7/6VT388MPnXL9gwQJ1dXXp5ZdfTt133XXXqbKyUmvWrLmo39Pss65r61VTZ98lP0+mSX6vJb9HP+p7z+NJfN96PInv8eT3utfjkc+b+I6OG4YSU2kM9UXjauuJqK07oraeiOrae7W7tlW7a1vU3nv6zcrK8gItv32qZowvtOzrBOBOfdGYbvn33+loc7dun16mW6aWalxhtsYWZis36FdnX1SdfVF19UXVG4mlHpfIOT7099nAP51y9dh8055ruOYiTuCsAWB4+uG2w1rx8j4Fs7z6ZEWR5lxWpOsmjdTE4hHK8nmV5U38fOTznn5fJPnPs52dfpz9PoldQn6vad34F/v6mGXK7wYAgAUWXT9Bmw/Uq6apSzUuaP0sCPu1Z/ktToeR0fr7+7Vz504tW7YsdZ/X61VVVZW2b99+3sds375dS5cuPeO++fPn66WXXrrg79PX16e+vtNFivb29ksL/Cw/euOw/uN3NaY+Jy4s5Pdq2rgCfW7OeH1mehnjrwAMSTDLp2V/8wl9+T936ld7T+pXezNvb5nP69Ghx291OgwAAHCR/nbaGK3bdlgnWnu07WCTth1skl5zOqpLs+/b8xUO2FuWoAgCAEhb864YpTceulm7alu091ir9h5r0zsn2tTzoU9XYnhpampSLBZTaWnpGfeXlpbqwIED531MXV3dea+vq6u74O+zcuVKfetb37r0gC8gN5SlMfkhy57fSRfq7jj3U0f62GJE8hNMMcOQMdD1EYsbqfu9nsRzeD1Sls+rgmy/8gduRSMCunpsvmaML9SUMbny+5gCC+DSzb+qVP++YLp++16jTrT26ERLj+raexU3En/PjQhkKSeYlfqEY3LwQnygUzUelwzDUMwwPrJL1SpOdqAAAIDBK80LadtDN+nPDZ16q6ZZv685pbcON6ups9/p0DIKRRAAQForK8hWWUG2/nZamaTEfP/owJug0bihWDzx5qjX60mNmDj7TYWzx2Ol/yBIOG3ZsmVndI+0t7ervLzctOdfcvMVWnLzFaY9HwDAHh6PR3937Tj93bXjUvdFYnH1R+PK9vvk9VJkAAAA5vJ4PJpcmqvJpblaOLdC0un3RmJxQ5F4XPH4R78vcrbzjRG3S7bfvDHhF4siCAAgo3i9HgV4g2HYKi4uls/nU319/Rn319fXa/To0ed9zOjRowd1vSQFg0EFg8FLDxgA4Hp+n5duMwAAYKsPvzeSLfuLCpmGTA0AAGSMQCCgmTNnavPmzan74vG4Nm/erLlz5573MXPnzj3jekl67bXXLng9AAAAAABwDzpBAABARlm6dKkWLVqkWbNmafbs2Xr66afV1dWlxYsXS5LuvfdejR07VitXrpQk/eM//qNuvPFG/du//Ztuu+02bdiwQX/84x/1gx/8wMkvAwAAAAAA2IAiCAAAyCgLFixQY2Ojli9frrq6OlVWVmrTpk2p5ee1tbXyek83u15//fX6yU9+okcffVTf+MY3dMUVV+ill17S1Vdf7dSXAAAAAAAAbOIxjPRfD9ve3q78/Hy1tbUpLy/P6XAAAEgLvD7ah7MGAOBcvD7ah7MGAOBcF/v6yE4QAAAAAAAAAADgShRBAAAAAAAAAACAK1EEAQAAAAAAAAAArkQRBAAAAAAAAAAAuBJFEAAAAAAAAAAA4EoUQQAAAAAAAAAAgCtRBAEAAAAAAAAAAK5EEQQAAAAAAAAAALgSRRAAAAAAAAAAAOBKFEEAAAAAAAAAAIArUQQBAAAAAAAAAACuRBEEAAAAAAAAAAC4EkUQAAAAAAAAAADgShRBAAAAAAAAAACAK1EEAQAAAAAAAAAArkQRBAAAAAAAAAAAuBJFEAAAAAAAAAAA4EoUQQAAAAAAAAAAgCtlOR3AxTAMQ5LU3t7ucCQAAKSP5Oti8nUS1iEXAQDgXOQi9iEXAQDgXBebi2REEaSjo0OSVF5e7nAkAACkn46ODuXn5zsdhquRiwAAcGHkItYjFwEA4MI+LhfxGBnwkY14PK6TJ08qNzdXHo9nyM/T3t6u8vJyHTt2THl5eSZGODxxnubiPM3FeZqHszSXmedpGIY6OjpUVlYmr5cJl1YiF0lPnKe5OE9zcZ7m4SzNRS6SmchF0hPnaS7O01ycp3k4S3M5kYtkRCeI1+vVuHHjTHu+vLw8/sCaiPM0F+dpLs7TPJylucw6Tz51aQ9ykfTGeZqL8zQX52keztJc5CKZhVwkvXGe5uI8zcV5moezNJeduQgf1QAAAAAAAAAAAK5EEQQAAAAAAAAAALjSsCqCBINBVVdXKxgMOh2KK3Ce5uI8zcV5moezNBfnObzx/99cnKe5OE9zcZ7m4SzNxXkOb/z/NxfnaS7O01ycp3k4S3M5cZ4ZsRgdAAAAAAAAAABgsIZVJwgAAAAAAAAAABg+KIIAAAAAAAAAAABXoggCAAAAAAAAAABciSIIAAAAAAAAAABwJdcVQVavXq2KigqFQiHNmTNHO3bs+MjrX3jhBU2ZMkWhUEjXXHONNm7caFOkmWEw57l27VrNmzdPhYWFKiwsVFVV1cee/3Az2D+fSRs2bJDH49Gdd95pbYAZZLBn2draqgcffFBjxoxRMBjU5MmT+X7/kMGe59NPP60rr7xS2dnZKi8v19e+9jX19vbaFG16+93vfqfbb79dZWVl8ng8eumllz72MVu3btWMGTMUDAZ1+eWX67nnnrM8TliHXMRc5CLmIhcxD7mIuchFzEMuAnIRc5GLmItcxFzkI+YhFzFPWuYihots2LDBCAQCxrp164x3333XuP/++42CggKjvr7+vNe/8cYbhs/nM5544glj3759xqOPPmr4/X7jnXfesTny9DTY87znnnuM1atXG7t37zb2799v/MM//IORn59vHD9+3ObI09NgzzPp8OHDxtixY4158+YZd9xxhz3BprnBnmVfX58xa9Ys49ZbbzW2bdtmHD582Ni6dauxZ88emyNPT4M9z/Xr1xvBYNBYv369cfjwYePVV181xowZY3zta1+zOfL0tHHjRuORRx4xfvGLXxiSjBdffPEjr6+pqTHC4bCxdOlSY9++fcZ3v/tdw+fzGZs2bbInYJiKXMRc5CLmIhcxD7mIuchFzEUuMryRi5iLXMRc5CLmIh8xD7mIudIxF3FVEWT27NnGgw8+mPp1LBYzysrKjJUrV573+s9+9rPGbbfddsZ9c+bMMb70pS9ZGmemGOx5ni0ajRq5ubnGj3/8Y6tCzChDOc9oNGpcf/31xrPPPmssWrSIF/sBgz3LZ555xpg4caLR399vV4gZZbDn+eCDDxo333zzGfctXbrUuOGGGyyNMxNdzIv9P//zPxtXXXXVGfctWLDAmD9/voWRwSrkIuYiFzEXuYh5yEXMRS5iHXKR4YdcxFzkIuYiFzEX+Yh5yEWsky65iGvGYfX392vnzp2qqqpK3ef1elVVVaXt27ef9zHbt28/43pJmj9//gWvH06Gcp5n6+7uViQSUVFRkVVhZoyhnue3v/1tlZSU6L777rMjzIwwlLP85S9/qblz5+rBBx9UaWmprr76aj3++OOKxWJ2hZ22hnKe119/vXbu3JlqDa2pqdHGjRt166232hKz2/Ba5B7kIuYiFzEXuYh5yEXMRS7iPF6L3INcxFzkIuYiFzEX+Yh5yEWcZ8drUZZpz+SwpqYmxWIxlZaWnnF/aWmpDhw4cN7H1NXVnff6uro6y+LMFEM5z7M99NBDKisrO+cP8XA0lPPctm2bfvjDH2rPnj02RJg5hnKWNTU1+s1vfqPPfe5z2rhxow4ePKgHHnhAkUhE1dXVdoSdtoZynvfcc4+ampr0qU99SoZhKBqN6stf/rK+8Y1v2BGy61zotai9vV09PT3Kzs52KDIMFrmIuchFzEUuYh5yEXORiziPXMQ9yEXMRS5iLnIRc5GPmIdcxHl25CKu6QRBelm1apU2bNigF198UaFQyOlwMk5HR4cWLlyotWvXqri42OlwMl48HldJSYl+8IMfaObMmVqwYIEeeeQRrVmzxunQMtLWrVv1+OOP6/vf/7527dqlX/ziF3rllVe0YsUKp0MDgBRykUtDLmIuchFzkYsAyATkIpeGXMR85CPmIRfJPK7pBCkuLpbP51N9ff0Z99fX12v06NHnfczo0aMHdf1wMpTzTHryySe1atUqvf7665o2bZqVYWaMwZ7noUOHdOTIEd1+++2p++LxuCQpKytL7733niZNmmRt0GlqKH82x4wZI7/fL5/Pl7rvE5/4hOrq6tTf369AIGBpzOlsKOf5zW9+UwsXLtQXvvAFSdI111yjrq4uffGLX9Qjjzwir5f6+mBc6LUoLy+PT15mGHIRc5GLmItcxDzkIuYiF3EeuYh7kIuYi1zEXOQi5iIfMQ+5iPPsyEVc838kEAho5syZ2rx5c+q+eDyuzZs3a+7cued9zNy5c8+4XpJee+21C14/nAzlPCXpiSee0IoVK7Rp0ybNmjXLjlAzwmDPc8qUKXrnnXe0Z8+e1O0zn/mMbrrpJu3Zs0fl5eV2hp9WhvJn84YbbtDBgwdTCZMkvf/++xozZsywfZFPGsp5dnd3n/OCnkyiEjuvMBi8FrkHuYi5yEXMRS5iHnIRc5GLOI/XIvcgFzEXuYi5yEXMRT5iHnIR59nyWmTaivU0sGHDBiMYDBrPPfecsW/fPuOLX/yiUVBQYNTV1RmGYRgLFy40Hn744dT1b7zxhpGVlWU8+eSTxv79+43q6mrD7/cb77zzjlNfQloZ7HmuWrXKCAQCxs9//nPjgw8+SN06Ojqc+hLSymDP82yLFi0y7rjjDpuiTW+DPcva2lojNzfXWLJkifHee+8ZL7/8slFSUmL8y7/8i1NfQloZ7HlWV1cbubm5xk9/+lOjpqbG+PWvf21MmjTJ+OxnP+vUl5BWOjo6jN27dxu7d+82JBlPPfWUsXv3buPo0aOGYRjGww8/bCxcuDB1fU1NjREOh42vf/3rxv79+43Vq1cbPp/P2LRpk1NfAi4BuYi5yEXMRS5iHnIRc5GLmItcZHgjFzEXuYi5yEXMRT5iHnIRc6VjLuKqIohhGMZ3v/tdY/z48UYgEDBmz55t/P73v0/9txtvvNFYtGjRGdf/7Gc/MyZPnmwEAgHjqquuMl555RWbI05vgznPCRMmGJLOuVVXV9sfeJoa7J/PD+PF/kyDPcs333zTmDNnjhEMBo2JEyca//qv/2pEo1Gbo05fgznPSCRiPPbYY8akSZOMUChklJeXGw888IDR0tJif+BpaMuWLef9uzB5hosWLTJuvPHGcx5TWVlpBAIBY+LEicaPfvQj2+OGechFzEUuYi5yEfOQi5iLXMQ85CIgFzEXuYi5yEXMRT5iHnIR86RjLuIxDHp0AAAAAAAAAACA+7hmJwgAAAAAAAAAAMCHUQQBAAAAAAAAAACuRBEEAAAAAAAAAAC4EkUQAAAAAAAAAADgShRBAAAAAAAAAACAK1EEAQAAAAAAAAAArkQRBAAAAAAAAAAAuBJFEAAAAAAAAAAA4EoUQQAAAAAAAAAAgCtRBAEAAAAAAAAAAK5EEQQAAAAAAAAAALgSRRAAAAAAAAAAAOBK/x/J2Mbw++E2/AAAAABJRU5ErkJggg==",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABkEAAAJtCAYAAACBs9diAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAACs+UlEQVR4nOz9eXychX3u/V/37Nr3zbJsecGLvAYDxiwhJAaHpCTpcspJekLqk9BfKf49NG5OEycsT5IWctqG5nlaGp+4cSFteqCHJpQGSkIdHAIYCDYGW/KCd1v7Ym0jadb7+WMW2bFsS5Zm7pl7Pu/XS68GWWN/RRNLmmu+38swTdMUAAAAAAAAAACAzTisHgAAAAAAAAAAACAVCEEAAAAAAAAAAIAtEYIAAAAAAAAAAABbIgQBAAAAAAAAAAC2RAgCAAAAAAAAAABsiRAEAAAAAAAAAADYEiEIAAAAAAAAAACwJUIQAAAAAAAAAABgSy6rB5iMaDSqtrY2FRUVyTAMq8cBACAjmKapoaEhzZo1Sw4Hr2tIJb4XAQDgQnwvkj58LwIAwIUm+71IVoQgbW1tamhosHoMAAAy0unTpzV79myrx7A1vhcBAODi+F4k9fheBACAi7vc9yJZEYIUFRVJin0yxcXFFk8DAEBmGBwcVENDQ/LrJFKH70UAALgQ34ukD9+LAABwocl+L5IVIUhi1bO4uJgv9gAA/BpOIqQe34sAAHBxfC+SenwvAgDAxV3uexGOdgIAAAAAAAAAAFsiBAEAAAAAAAAAALZECAIAAAAAAAAAAGyJEAQAAAAAAAAAANgSIQgAAAAAAAAAALAlQhAAAAAAAAAAAGBLhCAAAAAAAAAAAMCWCEEAAAAAAAAAAIAtEYIAAAAAAAAAAABbIgQBAAAAAAAAAAC2RAgCAAAAAAAAAABsiRAEAAAAAAAAAADYEiEIAAAAAAAAAACwJUIQAAAAAAAAAABgS4QgAAAAAAAAAADAlghBAAAAAAAAAACALRGCAAAAAAAAAAAAWyIEAQAAAAAAAAAAtkQIAgAAAAAAAAAAbGnKIcgrr7yiO++8U7NmzZJhGHr22Wcv+5idO3fq6quvltfr1cKFC/XEE09cwagAAAAAAADW4nkRAACyy5RDEL/fr1WrVunxxx+f1McfP35cH//4x3Xrrbdq7969+uM//mN94Qtf0E9/+tMpDwsAAAAAAGAlnhcBACC7uKb6gDvuuEN33HHHpD9+69atmjdvnr797W9LkpYuXapXX31Vf/3Xf60NGzZM9Y9HhjBNU6YpmfH/7DAMORyG1WMBAAAAsAnTNCVJhsHPGcgsPC+ChMRzI1GeFwGAjDblEGSqdu3apfXr15/3vg0bNuiP//iPL/qYQCCgQCCQ/OfBwcEZnemhf9uvf3n79Iz+ntnKUOwLdOLnisSX61i4IZk6P+yImlIkak74e7mdhrwup7wuh/K9TtWX5qmhLF9zyvM1pyJfNy2sVEWhN+WfEwAAAIDsYZqmXjvSq3947bgOdgwpEI5oNBjRaCiiikKvvvnJZfro8jqrxwSuWCY+L/K9V47qb3YcmdHfMyud81xIInBNPD9imvGQQ5JMKWKaikRNRZP/98LfzuNyKN/jVL7bqUKfSwuqCrW4tkhLaou0pLZYcyvyCXYBwAIpD0E6OjpUU1Nz3vtqamo0ODio0dFR5eXlXfCYRx99VF//+tdTNlMoYmosFE3Z75+rQhFToUhYwwGp1y+d7hvVG+pL/rrTYeimhZX65OpZun1ZrQq9Kf+vHwAAAIAMFYpE9ZP32vS9V47rQPvET/B2DwX0h/+0R3evm6uvfmypfG5nmqcEpi8TnxcJhqMaCoRT9vvnqmA4qmA4qn6FpAHpcOew/mN/R/LXl80q1n23LtSGZbVysjUCAGmTkc9Cb9myRZs3b07+8+DgoBoaGmbs9/8fGxbrjz60YMZ+P6tN9UUEpnmZf5Z5wYaIw2HIYUhOw5BhxP5z7NdjHxk1TQUjUQVCUQXCUQ2OhXTm7IhO943qdN+IWtoH1dw2qF8c7tYvDnfL69qnL9w8T5tvW8wXfgAAACAHtPWP6r0z/drXOqD9rYN670y/zo6EJEl5bqfuurZBv7GyTkU+t/LcTnndDv3Daye09RdH9YNdJ/X2ibP62898QPOrCi3+TIDUS/XzIv/t+rn6+MpZM/b7WW2qzyokngZJnN0b/+dzP8KQYYxviTgMyWEYcjpib0b8nx3nPC8yGoptso0EI+ofDen9ziEd7BjSwY5BHe4YVnPboP7oh3s0v6pAf/Shhfrk6llyO6dc1wsAmKKUhyC1tbXq7Ow8732dnZ0qLi6e8NUOkuT1euX1pu5sUnmBR+UFnpT9/oi5trH8vH8+1j2s595t07/tbdPxHr8ef/moDnUM6Tv/9QNshQAAAMASpmlqYDSkU30jausfVVm+R4tqilRm8c8L0aiprqGATvWNqHsooOI8lyoKvKoojP0sky1PmpmmqV8c7tb3Xz2uX77fc8GvVxV59fs3NOr31s5Raf6F/86/cscSXT+/XJv/5V21tA/qo//PL1Vb7FOh16VCn0tl+W59bl2jblhYmY5PB7gimfi8SGm+Z8L/zWFm3bKoKvmfz/qD+ofXT+iJ147rWLdfX/o/7+qf3jipH35hrQp4TgQAUirlf8uuW7dOL7zwwnnve+mll7Ru3bpU/9HIMPOrCvXH6xfp/o9cpX/b26Y//df39J8HuvQ7331d2+6+Rg3l+VaPCAAAgCwVicZegZu4326aUvfQmN451a+9p/v1zql+nej1y+NyKM/tVJ7bKafDUMfA2IQnYaqKvFpUU6hCr0v+QETDgbD8gbBGghEFI9HkyRNTpop9bhXnuVWS51aB1yXTNBUMRxWKRBWKmHI5DeW5nfK5nfK5HYpGFduiDkcUCEUViprSOXP7A2Gd6R9VMHzxE76GIbnir0Z2GobyPE4VeF0q8LhU6HWpOM+tqiJPMjipLvJpdlmeGsrzVZbvlmEYGgtFdKLXr6NdfrX1j6q2xKeF1YWaV1kw7bNT/kBYz+5t1T+8dkJHuoYlSQ5DWlpXrOWzSrR8dolW1Jeoqa5YHtelA50PLa7Wf9x/s+5/6h29caxPp/pGzvv1l1o69ZU7luiem+dzax8ZiedFIEllBR5tvm2R7rl5nn745in93ctHtPd0vzb98x5tu/saubIk3AaAbDTlEGR4eFhHjoyXZx0/flx79+5VeXm55syZoy1btqi1tVU/+MEPJEl/+Id/qL/927/Vn/7pn+q///f/rp///Of6l3/5Fz3//PMz91kgqxiGoU99oF5zK/L1B/+4Wwc7hvSpx1/T9+5eozVzyy//GwAAACCnRaKmnv7Vae05dVZnzo7ozNlRtQ+MKTJRS+2vGQlGYrfaf01VkVezSvPUOxzQmbOj6h4KqHsoMMHvcKGxUEBdk/zYqXA6DNWX5qm6yKuhsbB6/QH1+YOKZybxTr7Y5+wPRtQzHJzU71vgcaokz632wbELTuNKsbCioTxfcysK1BAPThrK8lVd7FWxz60iXyxkKfA4zwsdTNPU7pNn9S9vn9ZP3mvXSDAiSSr0uvS71zRo442NV/zCp5pin/73PdfraLdfA6MhDQfCGh4La8eBTv3onVY98sJBNbcN6lu/tVJ5HnpDkFo8L4LpKPK59Ye3LNB188r1mW1v6OVD3Xrg2f169LdWEOQCQIoYpjnRt70Xt3PnTt16660XvP9zn/ucnnjiCf3+7/++Tpw4oZ07d573mC9+8YtqaWnR7Nmz9eCDD+r3f//3J/1nDg4OqqSkRAMDAyouLp7KuMhwbf2j+sKTb6ulfVBFPpde/tKHVFmYupVfALATvj6mD/+ugczyyAsH9L1Xjl3247wuh1bOLtHqhlJ9YE6ZFtcWyTRNjQajGg1FFIpEVV3k1eyy/POeOB8OhHWka1iHO4cUCEdV6HUmNyzyPE55XA55XQ55nLHHDI6FYm+jIQ2NheV2OuJvhtxOh4KRqMZCsa2PsXBEhmHIG/89vC6nXPHb8rHb84a8bocayvJVV+K74JXB0aipwbGQgpGoIlFTkaipcCS2BeMPhOMbKxH1jwbVOxxU73BAPf6gOgbGdObsiDoHzw9rin0uLaguVH1pntoHxnSka1gDoxeGRBNxOQyV5ntUlu9WWYFHPUMBHevxJ3+9sSJfn13XqN+9ZraKfO5J/Z5TZZqm/vGNk/rGv7coHDXVVFdMb0ga5erXR54XwUx5qaVT/79/fFtRU9p82yL9Xx+5yuqRACCrTPbr45RDECvwxd7eRoJh/Zetu9TcNqj/em2DvvXbK60eCQCyAl8f04d/10Dm+Oc3T+mrP94nSfqDD85XU12xZpflaXZZvkrzY0+0J8IEl8OQw8Gras81FoqotX9U/SMhza3IV0WB54Jtjp7hoI50Det034hOnx3Rqb4Rne4bUZ8/qMGxsIbGQskNlF+X53bq4yvr9LvXNOjaxrK0var5jWO9+qMf7lGfP7YNc928cv3Omtn62Io6+gdTiK+P6cO/a/v6xzdO6sFn90uS/uJ3Vup3r2mweCIAyB6EIMgqb5/o0+9s3SXDkP7tvhu1cnap1SMBQMbj62P68O8ayAyvHO7Wxid+pUjU5BWzFjJNU2OhqPpHgzrrD6l/JKi+kaAMGbplcZVloUNr/6i+9uN9+sXh7uSZrzy3U1+4eZ7+5PbFlsxkd3x9TB/+Xdvb/3zxoL6786g8Tod+8acfUl1JntUjAUBWmOzXR1qXkBGuaSzXp1bPkmlK//dzzcqCbA4AAABpdLhzSPf9cI8iUVO/+YF6/f8/vNDqkXKWES9iryvJU9OsYt2wsFK/sXKWPr7S2q2L+tI8PbHxOr325Q/rf2xYrPmVBRoNRfQ3Pz+isVDEsrkA4HL+x+2Ldc3cMgUjUf3TGyetHgcAbIcQBBnjK3csVb7HqT2n+vXs3larxwEAAECG6PMHtfEffqWhQFjXNZbrW79NeSwublZpnu67daF2/MktcsXPoSXOZAFAJnI4DH3+pnmSYmcfCW4BYGYRgiBj1Jb4tCn+ir5HXzio4UDY4okAAACQCX6w64Ra+0c1tyJf/+uza+R1OS//IOQ8wzBUUeiRJPUMBy7z0QBgrduaalRfmqezIyE9t7fN6nEAwFYIQZBRPn/TPM2tyFfXUEB/+/MjVo8DAACADPD8e+2SpP/rw1eprMBj8TTIJpWFXklS7zCbIAAym8vp0N3r5kqStr92nDPhADCDCEGQUbwupx78eJMk6fuvHlPn4JjFEwEAAMBKhzuH9H7XsDxOh9Y31Vg9DrJMIgTpZhMEQBa469oG+dwOHewY0hvH+qweBwBsgxAEGecjS6u1Zm6ZQhFTP36HbhAAAIBc9pP4FsjNV1WqJM9t8TTINolzWGyCAMgGpfke/dbVsyVJT7x+3OJpAMA+CEGQcQzD0O9eE/ui/8zuM6yAAgAA5CjTNPXCvlgI8vGVdRZPg2xUFd8EoRMEQLbYeEOjJOmllk6d7huxdhgAsAlCEGSkj62ok8/t0JGuYb17ZsDqcQAAAGCBw53DOsIpLEwDxegAss1VNUW6+apKRU3pB7tOWD0OANgCIQgyUpHPrTuWx17t98zu0xZPAwAAACs8H98C+eCiShX7OIWFqaMYHUA22nhjoyTpqV+dlj8QtnYYALABQhBkrN9ZEzuJ9dzeNo2FIhZPAwAAgHQyTVPPv9cmKbYlDFyJCs5hAchCH1pUrbkV+RoaC+vnB7usHgcAsh4hCDLWuvkVmlXi0+BYWP95oNPqcQAAAJBGhzuHdbTbzyksTEtl8hwWmyAAsofDYei2pbGvfa8c7rZ4GgDIfoQgyFgOh6HfXjNekA4AAIDckdgC4RQWpiNxDqvPH1Akalo8DQBM3i2LqyRJr7zfLdPk7y8AmA5CEGS03746FoK8crhbnYNjFk8DAACAdDBNUz+J94F8fCWnsHDlygtimyBRU+ofYRsEQPa4trFcPrdDnYMBHe4ctnocAMhqhCDIaI2VBbq2sUxRU/rxO61WjwMAAIA0ONQ5pGPdfnlcDq1fyiksXDm306Gy/NgmESexAGQTn9up6+dXSJJ+cZheEACYDkIQZLzfOeckFiugAAAA9vfCvg5J0gevqlIRp7AwTYly9F7K0QFkmQ9eFT+JdbjH4kkAILsRgiDjfWxFnXxuh450DevdMwNWjwMAAIAU23PyrCTp1iVVFk8CO0iUo3cTggDIMolekLeO92kkGLZ4GgDIXoQgyHhFPnfyDMJLLR0WTwMAAIBUMk1T+9tiL3xZWV9q7TCwhcQmCOewAGSb+ZUFqi/NUzAS1ZvH+qweBwCyFiEIskIiBNlxgDuYAAAAdnbm7Kj6R0JyOw0tqi20ehzYQBXnsABkKcMwktsgvzjcbfE0AJC9CEGQFW5ZVCWHIR3sGFJr/6jV4wAAACBF9rfGtkAW1RTJ63JaPA3soKIgdg6rhxAEQBYa7wUhBAGAK0UIgqxQVuDRmrllkqSfH2QbBAAAwK4Sp7BW1JdYPAnsorIosQnCOSwA2eeGhRVyOQwd6/HrdN+I1eMAQFYiBEHW+PCS2Emsnx/otHgSAAAApMq+1kFJ0nJCEMwQNkEAZLNin1tXz4m9KJSTWABwZQhBkDU+vKRakvTa0V6NBMMWTwMAAICZZpqmmuPnsAhBMFMSmyAUowPIVh9cVCmJk1gAcKUIQZA1FtUUqr40T8FwVK8f6bV6HAAAAMyw9oEx9fqDcjoMLaktsnoc2ESiGL1nOCDTNC2eBgCm7pZFsReFvn60V6FI1OJpACD7EIIgaxiGoY8sjX3h//khekEAAADsZl98C+Sq6kL53JSiY2ZUFMbOYQXCUfmDEYunAYCpWzarWBUFHg0Hwtpz8qzV4wBA1iEEQVZJnMT6+YEuXsUFAABgM4lTWJSiYyble1zK98RCtZ4hekEAZB+Hw9BNV8VOYr1+lMsYADBVhCDIKtfPr1Ce26mOwTG1tA9aPQ4AAABm0D76QJAiiW2QXj8hCIDsdM3cWDn6nlNsggDAVBGCIKv43M7kqx9+foCTWAAAAHayvy32IhdCEMy0yngvSPcQ5egAstMH5sRCkL2n+xWNchkDAKaCEARZ5yPxk1g7DhKCAAAA2EXn4Ji6hwJyGFJTXbHV48BmKgrGy9EBIBstqS1SvsepobGwjnQPWz0OAGQVQhBknVvjIci7Z/rVzU1fAAAAW9gfP4W1sLpQeR5K0TGzqori57CG2QQBkJ1cTodWzo5tSlKODgBTQwiCrFNT7NPy+mKZprTzENsgAAAAdkAfCFKJTRAAdnD1HHpBAOBKEIIgK926OLYN8sv3eyyeBAAAADMhsQmyfBYhCGZeJcXoAGxgPATpt3YQAMgyhCDISjctjJWjv360R6ZJIRgAAEC2298aK0VfMZsQBDOvsii+CUIxOoAs9oE5pZKkI13DGhgJWTsMAGQRQhBkpQ/MKVOe26me4aAOdQ5ZPQ4AAACmoXsooI7BMRmUoiNFkuew2AQBkMUqCr1qrMiXJL1zmpNYADBZhCDISh6XQ9fNK5ckvcpJLAAAgKy2vy12Cmt+ZYEKvC6Lp4EdJYrRe4YIQQBkN05iAcDUEYIga924sEKS9PrRXosnAQAAwHTsPxMLQVZQio4USWyCDI6FFQxHLZ4GAK5c4iTWO5SjA8CkEYIga90Y7wV581ivQhF+kAEAAMhWiU2Q5YQgSJGSPLdcDkMS5egAstsH4psge0/1KxqlIxUAJoMQBFlraW2xygs88gcjevd0v9XjAAAA4Ao1t8VK0ZfNIgRBajgchsoLEiexKEcHkL2W1BYp3+PUUCCs97uGrR4HALICIQiylsNhaN2C2EmsV4/QCwIAAJCNBkZDOnN2VBKl6EitykLK0QFkP5fToZWzYy8a4CQWAEwOIQiy2o0LYiexXiMEAQAAyEoH22NbILNKfCrJd1s8DeysopBydAD2MF6OTggCAJNBCIKsdlO8F+SdU/3yB8IWTwMAAICpOhAPQZpmsQWC1KqKb4L0+jmHBSC7jYcg/dYOAgBZghAEWW1ORb5ml+UpHDX11vE+q8cBAADAFB1oH5IkLeUUFlKssih+DotNEABZ7gNzSiVJR7qGNTASsnYYAMgChCDIeoltEE5iAQAAZJ8DHbFNEEIQpFpFvBidTRAA2a6i0KvGinxJ0junOYkFAJdDCIKsd0M8BKEcHQAAILuEI1Ed6mATBOmRLEYfZhMEQPbjJBYATB4hCLLeDQsqJEkHO4b4gQYAACCLnOj1KxCOKt/j1NzyfKvHgc0li9GH2QQBkP0SJ7HePd1v6RwAkA0IQZD1Kgu9WlJbJEl6/WivxdMAAABgslrifSBLaovkcBgWTwO7YxMEgJ0sry+RJO1vHZBpmhZPAwCZjRAEtpDsBXmfk1gAAADZoqWNPhCkTyIE6fMHFY3yhCGA7La0rlguh6Fef1BtA2NWjwMAGY0QBLZww8LYSaw3jrMJAgAAkC0OtBOCIH3K48Xokaip/tGQxdMAwPT43E5dVRO7irHvzIDF0wBAZiMEgS1c21guhyGd7B1RW/+o1eMAAABgEghBkE4el0MleW5JUi8nsQDYwIr62NfP/a2EIABwKYQgsIUin1sr4vcw32QbBAAAIOP1DgfUNRSQYSjZ7wakWoHHKUkaCUYsngQApi/xPMg+QhAAuCRCENjG9fNjJ7F2UY4OAACQ8Q7ES9EbKwpU4HVZPA1yhccV+xE4FIlaPAkATN+K2aWSYiEI5egAcHGEILCN6xfEe0GO9Vk8CQAAAC5n/BQWWyBIH7cz9iNwkBAEgA0sqS2Sy2Goj3J0ALgkQhDYxjVzy+R0GDrVN6JWekEAAAAyWksiBKmlDwTpkwxBwoQgALIf5egAMDmEILCNIp9byxO9IMc4iQUAAJDJKEWHFcbPYXE2BoA9UI4OAJdHCAJbuX5+uSTpDUIQAACAjBUIR3Ska1iStHQWIQjSx+OkEwSAvSR6Qd4jBAGAiyIEga2sS5SjE4IAAABkrCNdwwpHTZXkuTWrxGf1OMghiU0QzmEBsIsV8YsY+ylHB4CLIgSBrVzTWC6nw9DpvlGdOTti9TgAAACYwIH2IUmxUnTDMCyeBrnE7Yz9941idAB2QTk6AFweIQhspdDrSr4K4s1jfRZPAwAAgInQBwKruDmHBcBmKEcHgMsjBIHtrFsQO4lFLwgAAEBmIgSBVTiHBcCOVsZfDLqvtd/aQQAgQxGCwHaupxcEAAAgY5mmmQxBmghBkGYUowOwo+WzEyHIoMWTAEBmIgSB7Vwzt0xOh6EzZ0d1uo9eEAAAgEzSNRTQ2ZGQnA5DC6sLrR4HOWb8HBblwQDsg3J0ALg0QhDYToHXpZXxV0G8eZxeEAAAgEyS2AKZV1kgn9tp8TTINYlzWAHOYQGwEcrRAeDSCEFgS+vm0wsCAACQiQ51DEmSFtcWWTwJchHF6ADsyOd2alGyHL3f2mEAIAMRgsCWkr0gRwlBAAAAMkkiBFlKCAILuF2GJCnEJggAm1mRLEcfsHgSAMg8hCCwpTXxXpDW/lGdOUsvCAAAQKY4mNwEoRQd6eeNb4IE2QQBYDOUowPAxRGCwJbO6wU5Ri8IANjN448/rsbGRvl8Pq1du1ZvvfXWJT/+O9/5jhYvXqy8vDw1NDToi1/8osbGuJcMpFsoEtWRrmFJsfvlQLpxDguAXSU2QZopRweACxCCwLbWzoudxHrzOCexAMBOnn76aW3evFkPP/yw9uzZo1WrVmnDhg3q6uqa8OP/+Z//WV/5ylf08MMP68CBA/r+97+vp59+Wl/96lfTPDmAEz1+BSNRFXicqi/Ns3oc5CB3vBg9GOYJQgD2sqS2SE6HoV5/UB2DvNgHAM5FCALbWju/XJL0BpsgAGArjz32mO655x5t3LhRTU1N2rp1q/Lz87V9+/YJP/7111/XjTfeqM985jNqbGzU7bffrk9/+tOX3R4BMPMOnlOK7nAYFk+DXOThHBYAm/K5nVpYVShJauYkFgCchxAEtnXN3DI5DOlU34jaB0atHgcAMAOCwaB2796t9evXJ9/ncDi0fv167dq1a8LH3HDDDdq9e3cy9Dh27JheeOEFfexjH7vonxMIBDQ4OHjeG4DpO9gR+98SfSCwSmIThGJ0AHa0bFbs62tzG9+7AsC5CEFgW0U+t5bX0wsCAHbS09OjSCSimpqa895fU1Ojjo6OCR/zmc98Rt/4xjd00003ye12a8GCBfrQhz50yXNYjz76qEpKSpJvDQ0NM/p5ALnqUHwThD4QWMXjjG0g0QkCwI6akiHIgMWTAEBmIQSBra2dFzuJRS8IAOSunTt36pFHHtHf/d3fac+ePfrRj36k559/Xt/85jcv+pgtW7ZoYGAg+Xb69Ok0TgzY10FCEFjM4+IcFgD7SrwQlE0QADify+oBgFS6fn6Ftv3yOJsgAGATlZWVcjqd6uzsPO/9nZ2dqq2tnfAxDz74oD772c/qC1/4giRpxYoV8vv9+oM/+AN97Wtfk8Nx4WtCvF6vvF7vzH8CQA4bGgvpzNnYidIlnMOCRdyJThDOYQGwocQmSGv/qM76gyor8Fg8EQBkBjZBYGvXNJbLMKRjPX51DY5ZPQ4AYJo8Ho/WrFmjHTt2JN8XjUa1Y8cOrVu3bsLHjIyMXBB0OJ1OSZJpmqkbFsB5DnfGtkBqi30qyXdbPA1yVSIE4RwWADsq9rk1pzxfktTSzjYIACQQgsDWSvLcaqqLvRLijeNsgwCAHWzevFnbtm3Tk08+qQMHDujee++V3+/Xxo0bJUl33323tmzZkvz4O++8U9/97nf11FNP6fjx43rppZf04IMP6s4770yGIQBSL3EKazGnsGAhzmEBsLvl9fSCAMCv4xwWbG/tvAo1tw3qzWO9+sSqWVaPAwCYprvuukvd3d166KGH1NHRodWrV+vFF19MlqWfOnXqvM2PBx54QIZh6IEHHlBra6uqqqp055136s///M+t+hSAnHSwPd4HUkcIAut4EpsgYTYBAdjTslklemFfh/a3sgkCAAmEILC9tfPLtf2143qTTRAAsI1NmzZp06ZNE/7azp07z/tnl8ulhx9+WA8//HAaJgNwMYcoRUcG4BwWALtL9IKwCQIA4ziHBdu7rrFcknSka1jdQwGLpwEAAMg9pmnqYEfsFamLayhFh3US57ACFKMDsKnls0okxbpRR4Jhi6cBgMxACALbKyvwJF9x+BbbIAAAAGnXPjCmwbGwXA5DC6oLrB4HOcztNCSxCQLAvqqKvKou8so0pQOUowOAJEIQ5Ijr51dIkt483mvxJAAAALkncQprflWBvC6nxdMgl3k4hwUgByxLnsQiBAEAiRAEOWLtvNhJrDePsQkCAACQbgfjIcjiWk5hwVqJc1hBzmEBsLHl9bGTWM2UowOAJEIQ5Ijr4iHIoc4h9fmDFk8DAACQWxJ9IJSiw2rjxeimxZMAQOokN0HaKUcHAIkQBDmiotCrq6oLJUlvcRILAAAgrRLnsAhBYLVECBKMRGWaBCEA7GlZvBz9UMcQm28AIEIQ5JBEL8gbnMQCAABIm2A4qqPdw5KkxYQgsFjiHJbENggA+5pdlqdin0uhiKn3u4asHgcALEcIgpyxdn68F+Q4IQgAAEC6HOsZVihiqsjrUn1pntXjIMclitElytEB2JdhGMltEMrRAYAQBDkk0QtysGNQ/SP0ggAAAKTDgfZ4H0hdkQzDsHga5Dq3c/y/g4QgAOws0QvSQggCAIQgyB3VRT4tqCqQaUpvsQ0CAACQFgfbE30gxRZPAkgup0OOeA7CnXwAdrasPvZ1d38r5egAQAiCnLI23gvCSSwAAID0aIlvgiytIwRBZji3HB0A7Gp5/BxWS/ugolE6kADkNkIQ5JS18ZNYbxzrtXgSAACA3HAgvgmytI5SdGSGRC8IxegA7Gx+VaF8bodGghGd6PVbPQ4AWIoQBDnl+vgmSEv7oAZGQxZPAwAAYG/dQwH1DAdkGNLiWkIQZAaPK74JwjksADbmdBjJU5SUowPIdYQgyCk1xT7Nq4z1grx9gpNYAAAAqXSwI/akS2NFgfI9LounAWLcyU0QQhAA9pYoRycEAZDrCEGQcziJBQAAkB4Hkn0gbIEgc7hdsWZ0OkEA2N2yeC9Icxvl6AByGyEIcs71lKMDAACkRbIPpJZSdGSORCcI57AA2N25myCmSQ8SgNxFCIKcs3Z+bBNkf+uAhsboBQEAAEiV8U0QQhBkDs5hAcgVi2uL5HQY6vMH1TE4ZvU4AGAZQhDknLqSPM0pz1fUlN4+cdbqcQAAAGwpGI7qSNewJGkJ57CQQRLF6IQgAOzO53bqqupCSVJzK70gAHIXIQhyUrIX5Di9IAAAAKlwpGtY4aipYp9L9aV5Vo8DJHEOC0AuaaIcHQAIQZCbkr0gx+gFAQAASIXEKawldcUyDMPiaYBxiXNYwQj38QHYH+XoAEAIghyV6AXZ1zqg4UDY4mkAAADsJ9kHUsspLGQWd+IcFpsgAHLAMjZBAIAQBLlpdlm+ZpflKRI19fYJtkEAAABm2sGOIUmUoiPzJM9h0QkCIAckzmG19o/qrD9o8TQAYA1CEOSs5Ems44QgAAAAM8k0zfFNEEIQZBiPK3aejWJ0ALmg2OfWnPJ8SVJLO9sgAHITIQhyViIEeeMY5egAAAAzqXsooF5/UA5DWlTDOSxkFjfF6AByzPhJLHpBAOQmQhDkrLXzYr0g750ZkJ9eEAAAgBmTeKVpY2WB8jxOi6cBzsc5LAC5hl4QALmOEAQ5q6H8nF6Qk2etHgcAAMA26ANBJhsvRjctngQA0mNZfYkkQhAAuYsQBDmNk1gAAAAzL9EH0kQIggyU2AShEwRArkhsghztHtZIkEsYAHIPIQhyGiEIAADAzBsvRacPBJnH4+IcFoDcUl3kU1WRV6YpHWgfsnocAEg7QhDkNHpBAAAAZtZYKKKj3X5J0pJaNkGQedxOQxLF6AByS2IbpIVydAA5iBAEOY1eEAAAgJl1pGtYkaipkjy36kp8Vo8DXMDNOSzMgMcff1yNjY3y+Xxau3at3nrrrUt+/He+8x0tXrxYeXl5amho0Be/+EWNjY2laVqAcnQAuY0QBDmPk1gAAAAz59xTWIZhWDwNcKHkOSw2QXCFnn76aW3evFkPP/yw9uzZo1WrVmnDhg3q6uqa8OP/+Z//WV/5ylf08MMP68CBA/r+97+vp59+Wl/96lfTPDly2bJZlKMDyF2EIMh5hCAAAAAzpyUZgnAKC5mJYnRM12OPPaZ77rlHGzduVFNTk7Zu3ar8/Hxt3759wo9//fXXdeONN+ozn/mMGhsbdfvtt+vTn/70ZbdHgJmU2AQ51DHE338Acg4hCHIevSAAAAAzJ7EJ0kQIggw1fg7LtHgSZKNgMKjdu3dr/fr1yfc5HA6tX79eu3btmvAxN9xwg3bv3p0MPY4dO6YXXnhBH/vYx9IyMyBJc8rzVeR1KRiJ6v3OYavHAYC0uqIQhNuXsBN6QQAAAGaGaZpqaWMTBJktcQ4rwDksXIGenh5FIhHV1NSc9/6amhp1dHRM+JjPfOYz+sY3vqGbbrpJbrdbCxYs0Ic+9KFLnsMKBAIaHBw87w2YDsMw1JTsBaEcHUBumXIIwu1L2BEnsQAAAKavbWBMg2NhuRyGrqoptHocYEIUoyPddu7cqUceeUR/93d/pz179uhHP/qRnn/+eX3zm9+86GMeffRRlZSUJN8aGhrSODHsank9vSAActOUQxBuX8KOCEEAAACmL7EFsrC6UF6X0+JpgIm5nYYkQhBcmcrKSjmdTnV2dp73/s7OTtXW1k74mAcffFCf/exn9YUvfEErVqzQb/7mb+qRRx7Ro48+qmh04v8ebtmyRQMDA8m306dPz/jngtyzvD62CbK/lU0QALllSiFIum5fsvaJdKMXBAAAYProA0E28MbPYQU5h4Ur4PF4tGbNGu3YsSP5vmg0qh07dmjdunUTPmZkZEQOx/lPvzidsaDYNCfupvF6vSouLj7vDZiu5bNimyAt7YOKROlFApA7phSCpOv2JWufSDd6QQAAAKYvsQmSuDkOZCLOYWG6Nm/erG3btunJJ5/UgQMHdO+998rv92vjxo2SpLvvvltbtmxJfvydd96p7373u3rqqad0/PhxvfTSS3rwwQd15513JsMQIB3mVxXK53ZoJBjR8R6/1eMAQNpcUTH6VFzJ7UvWPmGFxEmsXUc5iQUAAHAlDnRQio7MlyhGD0Z4FTSuzF133aW/+qu/0kMPPaTVq1dr7969evHFF5MvGD116pTa29uTH//AAw/oT/7kT/TAAw+oqalJn//857Vhwwb9r//1v6z6FJCjnA4jua1JOTqAXOKaygdP9/alJK1YsUJ+v19/8Ad/oK997WsXrIRKsbVPr9c7ldGAaVs3v0LP7D6jXfSCAAAATNnQWEgne0ckEYIgsyU2QYLhiMWTIJtt2rRJmzZtmvDXdu7ced4/u1wuPfzww3r44YfTMBlwacvrS7TnVL/2tw7ok6vrrR4HANJiSpsg6bp9CVhh3YLYJsi+M/0aHAtZPA0AAEB2OdQxJEmqLfapvMBj8TTAxY2fw+LnUQC5J9EL0txG/y6A3DHlc1jcvoRdzSrNU2NFvqKm9KvjfVaPAwAAkFVa2ukDQXZIFKPTCQIgFyW+Tu9vHeDFyQByxpTOYUmx25fd3d166KGH1NHRodWrV19w+/LczY8HHnhAhmHogQceUGtrq6qqqnTnnXfqz//8z2fuswBmyLoFFTrRO6JdR3v1kaU1Vo8DAACQNQ60J/pAiiyeBLi08XNYhCAAcs+imiK5nYYGx8I6c3ZUDeX5Vo8EACk35RBE4vYl7Ov6+RX632+dphcEAABgilriZzWa6kosngS4NLfTkCQF2QQBkIM8LocW1xZpf+ug9rcOEIIAyAlTPocF2Nm6+bFekJb2QfWPBC2eBgAAIDuEI1EdjHeCcA4Lmc7DOSwAOS7RC7K/bcDiSQAgPQhBgHNUF/u0sLpQpim9cYxeEAAAgMk40etXIBxVvsepubyiFBnOwzksADluWX08BGmlHB1AbiAEAX5NYhvkDU5iAQAATEpz/BTWktoiORyGxdMAl5boBImaUiRKKTCA3LOccnQAOYYQBPg16xbEQpBdRwlBAAAAJuNAe+wU1tI6TmEh8yXOYUmcxAKQm5bWFcvpMNTrD6pzMGD1OACQcoQgwK+5Pr4JcqhzSD3DfDMAAABwOS3t8VJ0+kCQBRKbIJIU4CQWgBzkczu1sKpQUmwbBADsjhAE+DXlBR4tqS2SxEksAACAyTgQD0HYBEE2cDvHT7axCQIgVy1LnMSiHB1ADiAEASbASSwAAIDJ6R4KqHsoIMNQ8oUkQCYzDCNZjk4IAiBXJcrRE71eAGBnhCDABBLl6LvYBAEAALikxBbIvIoC5XtcFk8DTE5iGyTIOSwAOSpRjt7MOSwAOYAQBJjA2vkVchjSsW6/OgfHrB4HAAAgYyX6QJbSB4Is4naxCQIgtyV6vNoGxtRLHyoAmyMEASZQkufWslmx1VB6QQAAAC4usQnSRB8IskjiHFYwbFo8CQBYo8jn1rzKAknSfk5iAbA5QhDgIhK9IK8fIQQBAAC4mMQt8SY2QZBF3IkQhE0QADksWY7OSSwANkcIAlxEIgR57WiPxZMAAABkptFgRMe6hyVJy9gEQRbxcA4LALQiXo5OCALA7ghBgIu4rrFcLoehM2dHdap3xOpxAAAAMs6hziFFTamy0KOqIq/V4wCTljiHFaIYHUAOS4Qg+whBANgcIQhwEQVelz4wp1QS2yAAAAATaYmfwlpaVyzDMCyeBpg8tyv239cAmyAActiyeAhy5uyozvqDFk8DAKlDCAJcwg0LKiVJrx0hBAEAAPh1Le2xV47SB4Js42YTBABUkufW3Ip8SWyDALA3QhDgEm5cGAtBdh3tVTRqWjwNAABAZklsgiybVWLxJMDUJM9hRfgeH0BuW85JLAA5gBAEuITVDaXKczvV6w/qUOeQ1eMAAABkjEjU1MGO2PdHTZSiI8skitGDkYjFkwCAtVZSjg4gBxCCAJfgcTl03bxySZzEAgAAONfJXr9GghH53A7NqyywehxgSsbPYbEJAiC3UY4OIBcQggCXccOCCknS60d7LZ4EAAAgc7S0x05hLaktltNBKTqyS+IcFsXoAHId5egAcgEhCHAZiV6QN4/1KsQPSQAAAJKk5ngfCKXoyEZuF8XoACCdX46+v41tEAD2RAgCXEZTXbFK893yByN670y/1eMAAABkhEQpOn0gyEZuZ2x7iRc5AQDl6ADsjxAEuAyHw9C6+bGTWK8d4SQWAACANH4Oi00QZCNvohidTRAASPaCUI4OwK4IQYBJuCF+EotydAAAAKlraEzdQwEZhrSktsjqcYApSxajswkCAMkQ5L0zhCAA7IkQBJiEG+Pl6O+c6tdoMGLxNAAAANY60D4kSZpfWaB8j8viaYCpS4QgwYhp8SQAYL3lsyhHB2BvhCDAJMyrLFBdiU/BSFS/OtFn9TgAAACWSvaBxJ80AbKNh3NYAJBUkk85OgB7IwQBJsEwDN2wIH4S6ygnsQAAQG5L9oFQio4sxTksADgf5egA7IwQBJikGxfGTmK9Tjk6AADIcS3xV4lSio5s5XEakghBACCBcnQAdkYIAkzSjfFy9P1tA9zIBAAAOWskGNaxHr8kNkGQvTiHBQDnW8EmCAAbIwQBJqmm2KdFNYUyTU5iAQCA3HWwY0imKVUVeVVV5LV6HOCKjBejE4IAgDRejn66b1T9I7zwE4C9EIIAU3DTwipJ0qvvE4IAAIDclCxFZwsEWYxOEAA4X0m+W3PK4+XorYMWTwMAM4sQBJiCm6+KncT65fs9Mk3T4mkAAADSL1mKTh8IshjnsADgQitmx7ZB3mvtt3YQAJhhhCDAFKydXy6301Br/6hO9I5YPQ4AAEDasQkCO/AkN0F4YRMAJCR7Qc7QCwLAXghBgCnI97h09ZwySdKr73dbPA0AAEB6RaKmDnbEQpBlbIIgi9EJAgAXWpnYBCEEAWAzhCDAFJ17EgsAACCXHO/xaywUVb7HqbkVBVaPA1wxzmEBwIVW1JfIMKTW/lH1DAesHgcAZgwhCDBFN10VK0ffdbRXYV45BgAAckiiD2RJbZGcDsPiaYAr53bG/vtLMToAjCvyuTW/MvYiB05iAbATQhBgilbUl6gkz62hQFjv8k0BAADIIc1tse99KEVHthvvBCEEAYBzrZxdKkl690y/pXMAwEwiBAGmyOkwdMOCCknSq5zEAgAAOWS8FL3E4kmA6eEcFgBMjF4QAHZECAJcgZvivSCvHqEcHQAA5AbTNMdDEDZBkOXcyU0Q0+JJACCzJDZB3jvTL9Pk70gA9kAIAlyBmxfGekHeOdWv4UDY4mkAAABSr3sooF5/UA5DWlxTZPU4wLQkQpAg57AA4DzLZhXL5TDUMxxU28CY1eMAwIwgBAGuwJyKfM0pz1c4auqNo71WjwMAAJByzfFS9AVVhcrzOC2eBpgezmEBwMR8bqcWxV/ssI9eEAA2QQgCXKHxk1j0ggAAAPvjFBbshGJ0ALi4VQ2xXpB36QUBYBOEIMAVunlhLAT55fv0ggAAAPtraU+UohOCIPu5XYYkQhAAmMiK+lJJsV4QALADQhDgCt2woFIOQzra7Vdr/6jV4wAAAKQUmyCwE885xejRKMW/AHCulbNjmyDvnRng70gAtkAIAlyhkny3VjeUSpJeOcw2CAAAsK/hQFgnev2SpKVsgsAG3K7xH4VDUbZBAOBci2uL5HU5NDQ2/vUfALIZIQgwDR9cVCWJEAQAANjboY5BmaZUU+xVZaHX6nGAaUtsgkixbRAAwDi305Hc/NzXSi8IgOxHCAJMwy3xEOTVIz0Kc08YAADYVPIUFlsgsAn3OSFIMMz38QDw61bNLpUkvXuaEARA9iMEAaZh5exSlea7NTQW1t7T/VaPAwAAkBLJUnT6QGATTochp4NydAC4mPFekH5rBwGAGUAIAkyD02HoxoWVkjiJBQAA7CuxCbJsVonFkwAzx+2MhSBsggDAhRIhyP62AS5fAMh6hCDANCVOYv2CEAQAANhQOBLVwY4hSZzDgr0kekGCPLkHABeYX1moQq9LY6Go3u8atnocAJgWQhBgmj54VSwEea91QH3+oMXTAAAAzKzjPX4FwlEVeJyaU55v9TjAjPG4Yj8Ocw4LAC7kcBhaXh978QMnsQBkO0IQYJpqS3xaUlsk04wVpAMAANhJc/wU1tK6YjniHQqAHSQ2QUJh0+JJACAzJcrR3ztDOTqA7EYIAsyADyZOYh3iJBYAALAXStFhV25X4hxWxOJJACAzrYyHIO+yCQIgyxGCADMg0QvyyvvdMk1eSQYAAOwjUYpOHwjsxp3oBGETBAAmtKohVo5+sH1IYyECYwDZixAEmAHXNJYpz+1U91BAB9qHrB4HAABgRpimmdwEWUoIAptJnsOiEwQAJlRfmqfKQq/CUVP7WzmJBSB7EYIAM8DrcmrdggpJsW0QAAAAO+gYHFOfPyinw9Di2iKrxwFmVPIcVpgQBAAmYhiGVjeUSpL2nu63dBYAmA5CEGCGfPCqSknSK4cJQQAAgD0kTmEtrCqUz+20eBpgZnmchiQ2QQDgUj4wp1SS9A4hCIAsRggCzJBEOfqvTvTJHwhbPA0AAMD0JftAKEWHDXmSxeiEIABwMclNkFP9ls4BANNBCALMkHmVBWooz1MoYmrX0V6rxwEAW3v88cfV2Ngon8+ntWvX6q233rrkx/f39+u+++5TXV2dvF6vFi1apBdeeCFN0wLZqzkegiwjBIENjRejE4IAwMWsnF0iw5Ba+0fVPRSwehwAuCKEIMAMMQxDty6uliS9fKjL4mkAwL6efvppbd68WQ8//LD27NmjVatWacOGDerqmvjv3mAwqNtuu00nTpzQM888o0OHDmnbtm2qr69P8+RA9kmUojdRig4bcieL0U2LJwGAzFXkc2thVaEkekEAZC9CEGAGJUKQnYe6ZZr8MAUAqfDYY4/pnnvu0caNG9XU1KStW7cqPz9f27dvn/Djt2/frr6+Pj377LO68cYb1djYqFtuuUWrVq1K8+RAdhkcC+lU34gkzmHBnhLnsOgEAYBLGy9HP2vtIABwhQhBgBl0/fwKeV0OtfaP6v2uYavHAQDbCQaD2r17t9avX598n8Ph0Pr167Vr164JH/Pcc89p3bp1uu+++1RTU6Ply5frkUceUSQSueifEwgENDg4eN4bkGsOxE9h1ZfmqTTfY/E0wMzzcA4LACZldbwcnU0QANmKEASYQXkep9YtqJAkvXyQk1gAMNN6enoUiURUU1Nz3vtramrU0dEx4WOOHTumZ555RpFIRC+88IIefPBBffvb39af/dmfXfTPefTRR1VSUpJ8a2homNHPA8gGiVNYSzmFBZtyOw1JFKMDwOUkNkHeOz2gaJSrFwCyDyEIMMM+tKhKEr0gAJApotGoqqur9b3vfU9r1qzRXXfdpa997WvaunXrRR+zZcsWDQwMJN9Onz6dxomBzNAS3wThFBbsinNYADA5i2uKlOd2aigQ1tFurl4AyD6EIMAM+1C8F+TtE2c1OBayeBoAsJfKyko5nU51dnae9/7Ozk7V1tZO+Ji6ujotWrRITqcz+b6lS5eqo6NDwWBwwsd4vV4VFxef9wbkmuZ4CLKMEAQ25eYcFgBMisvp0Ir6EknSO5zEApCFCEGAGdZYWaD5lQUKR0299n6P1eMAgK14PB6tWbNGO3bsSL4vGo1qx44dWrdu3YSPufHGG3XkyBFFo+NPch0+fFh1dXXyeOg5ACYSDEf1fteQJKmJc1iwqUQnCJsgAHB59IIAyGaEIEAKJLZBdh7qtngSALCfzZs3a9u2bXryySd14MAB3XvvvfL7/dq4caMk6e6779aWLVuSH3/vvfeqr69P999/vw4fPqznn39ejzzyiO677z6rPgUg473fNaRQxFSxz6XZZXlWjwOkxPg5LO7bA8DlJHpB3iUEAZCFXFYPANjRrUuqtP2143r5UJdM05RhGFaPBAC2cdddd6m7u1sPPfSQOjo6tHr1ar344ovJsvRTp07J4Rh/nUdDQ4N++tOf6otf/KJWrlyp+vp63X///fryl79s1acAZLxz+0D4PgZ2lTiHFeAcFgBcViIEOdgxpNFgRHke56UfAAAZhBAESIHr5pUrz+1U11BALe2DWjarxOqRAMBWNm3apE2bNk34azt37rzgfevWrdMbb7yR4qkA+2hpj4cgdXwPA/tycw4LACatrsSn6iKvuoYC2t82oGsby60eCQAmjXNYQAp4XU7duLBSEiexAABA9qEUHblg/BwWIQgAXI5hGMltkL2n+i2dBQCmihAESJFbl1RJkl4+2GXxJAAAAJNnmqYOnHMOC7ArjzN26i3IOSwAmBTK0QFkK0IQIEUS5eh7Tp1V/0jQ4mkAAAAm58zZUQ0FwvI4HVpYXWj1OEDKcA4LAKYmsQnyzqmz1g4CAFNECAKkSH1pnhbVFCpqSr84zEksAACQHZrbBiRJi2oLk08SA3aUOIcVjJgWTwIA2WHV7FI5DKltYEwdA2NWjwMAk8ZPNUAK3boktg3CSSwAAJAtWhKnsOo4hQV7S4R8wXDE4kkAIDsUeF1aUhv7/mAP2yAAsgghCJBC65fWSJJ2Hu5WmDV7AACQBZoJQZAjxs9hsQkCAJN19dxSSdKek4QgALIHIQiQQh9oKFVpvlv9IyHtOdVv9TgAAACX1dIeC0GW1ZdYPAmQWl4XnSAAMFVr5pZJknazCQIgixCCACnkcjr0oUVVkqQdBzstngYAAODS+vxBtcdvfC+pLbJ4GiC1xs9hEYIAwGRdPScWgjS3DmosxDlBANmBEARIsQ/HT2L9/AC9IAAAILMl+kAaK/JV5HNbPA2QWm6nIUkKsgkCAJM2pzxflYUeBSNRNbcNWD0OAEwKIQiQYrdcVSWnw9D7XcM61Tti9TgAAAAXlXgyo2kWfSCwPw/nsABgygzD0Afi2yB7TvZbOwwATBIhCJBiJfluXRO/mflzTmIBAIAMlihFXzaLPhDYH+ewAODKJHtBKEcHkCUIQYA0WB8/ibXjICexAABA5mITBLlkfBPEtHgSAMguiV6QPafOyjT5OxRA5iMEAdLgw0urJUlvHuvTcCBs8TQAAAAXGgmGdazHL0laRgiCHOCJb4KE2AQBgClZObtELoehrqGAWvtHrR4HAC6LEARIg/mVBWqsyFcwEtWr73dbPQ4AAMAFDnYMyTSlykKvqot8Vo8DpJw7vgkSoBMEAKbE53YmXzDBSSwA2YAQBEgDwzD04SXxk1gHOIkFAAAyz3gfCFsgyA1upyEpVozOORcAmJqr470g75zqt3YQAJgEQhAgTT4SP4n18qEuRaP8kAUAADJLS7wPhBAEucLrdEqSTFOK8P05AExJoheETRAA2YAQBEiTaxvLVeR1qWc4qPdaB6weBwAA4DwtyU2QEosnAdLD7TKS/znISSwAmJI18U2QlvZBjQTpPgWQ2QhBgDTxuBz64KIqSdKOA50WTwMAADAuHInqYMeQJKmJTRDkCLdz/MfhUJhNEACYilmleaot9ikSNfXeGV7oCSCzEYIAafThJbGTWC+1EIIAAIDMcbTbr0A4qkKvS3PL860eB0gLl8OQEV8GYRMEAKYusQ2y5xQnsQBkNkIQII0+vKRaDkM62DGk030jVo8DAAAgSWqO94EsrSuSw2Fc5qMBezAMI7kNQggCAFOXKEffQy8IgAxHCAKkUVmBR9c0lkviJBYAAMgc9IEgV3niIUgoTAgCAFN19ZxSSdKeU/0yTc4KAshchCBAmt22tEaS9J8HuiyeBAAAIKY5HoI01dEHgtziccVDEDZBAGDKls0qkcflUJ8/qOM9fqvHAYCLIgQB0mx9UywEeeNYrwZGQxZPAwAAcp1pmslzWJSiI9e4nbHzbwE2QQBgyjwuh1bPLpUkvX2Ck1gAMhchCJBm8yoLtLC6UOGoqV8c7rZ6HAAAkOPOnB3V4FhYbqehRTVFVo8DpFWiE4RNEAC4MtfOi/WC/OpEn8WTAMDFEYIAFlifOInVQi8IAACwVuIU1lXVRcnTQECuGD+HxS17ALgSid7TtylHB5DB+CkHsMBtTdWSpJcPdfGqMwAAYKmW9kQpOqewkHsSxehBzmEBwBW5ek6ZDEM63uNX19CY1eMAwIQIQQALrG4oU0WBR0NjYb11nJVRAABgnRb6QJDDOIcFANNTkufW4vg5zd30ggDIUIQggAWcDkMfXhLbBnmJk1gAAMBCiXNYy2aVWDwJkH6Jc1hBQhAAuGLXzYudxPoVIQiADEUIAljktqZ4L8iBTpkmN4gBAED69fmDah+Ina5YWkcpOnKP22lI4hwWAExHoheEcnQAmYoQBLDITVdVyuty6MzZUR3qHLJ6HAAAkIOa46ew5lbkq8jntngaIP3cdIIAwLRd21gmKfZ9xXAgbPE0AHAhQhDAIvkel25aWClJeqmZk1gAACD9xk9h0QeC3JQoRg9HCUEA4ErVleRpdlmeoqa091S/1eMAwAUIQQALrY+fxHrpACEIAABIP/pAkOucjtg5rHCU87QAMB3Xxk9ivcVJLAAZiBAEsNBHllbLMKT3zgyofWDU6nEAAECOSZzDYhMEuSpxDiscIQQBgOm4Jn4S621CEAAZiBAEsFB1kU9Xz4l9o/CfLWyDAACA9PEHwjre45fEJghyF5sgADAzrotvgrxzql+hCCcGAWQWQhDAYrfHT2L9jBAEAACk0cGOQZmmVF3kVVWR1+pxAEu4nPEQhCfsMEWPP/64Ghsb5fP5tHbtWr311luX/Pj+/n7dd999qqurk9fr1aJFi/TCCy+kaVog9RZUFao0363RUCR5bhMAMgUhCGCx25fVSpJ2He3VwGjI4mkAAECuoBQdkNyORDE6myCYvKefflqbN2/Www8/rD179mjVqlXasGGDurq6Jvz4YDCo2267TSdOnNAzzzyjQ4cOadu2baqvr0/z5EDqOByGrpnLSSwAmYkQBLDYvMoCXVVdqHDU1M5DE3/TDAAAMNOaWylFB5zJTRBCEEzeY489pnvuuUcbN25UU1OTtm7dqvz8fG3fvn3Cj9++fbv6+vr07LPP6sYbb1RjY6NuueUWrVq1Ks2TA6mVKEf/FSEIgAxDCAJkgNuXxU9iNXMSCwAApEdzO6XogDvZCcI5LExOMBjU7t27tX79+uT7HA6H1q9fr127dk34mOeee07r1q3Tfffdp5qaGi1fvlyPPPKIIpHIRf+cQCCgwcHB896ATHdNPAR5+8RZmSbhMoDMQQgCZIDbm2InsXYe6tJY6OLfCAMAAMyEUCSqwx3DktgEQW5zcg4LU9TT06NIJKKamprz3l9TU6OOjo4JH3Ps2DE988wzikQieuGFF/Tggw/q29/+tv7sz/7son/Oo48+qpKSkuRbQ0PDjH4eQCosry+W1+VQrz+oYz1+q8cBgCRCECADrKgvUW2xT/5gRLuO9lo9DgAAsLn3O4cVjERV5HOpoTzP6nEAy7gpRkcaRKNRVVdX63vf+57WrFmju+66S1/72te0devWiz5my5YtGhgYSL6dPn06jRMDV8brcmp1Q6kk6a3jnMQCkDkIQYAM4HAYuq0pfhKrZeJXDwEAAMyU5rbYKaymumIZhmHxNIB1XIkQhE0QTFJlZaWcTqc6O88/ZdzZ2ana2toJH1NXV6dFixbJ6XQm37d06VJ1dHQoGAxO+Biv16vi4uLz3oBssHZe7CTWm8d4gSeAzHFFIcjjjz+uxsZG+Xw+rV27Vm+99dYlP76/v1/33Xef6urq5PV6tWjRIr3wwgtXNDBgV4lekJdaOhXhhzAAAJBCzW2UogPSOeewKEbHJHk8Hq1Zs0Y7duxIvi8ajWrHjh1at27dhI+58cYbdeTIEUXP6Z45fPiw6urq5PF4Uj4zkE7Xz6+QJL15vI9eEAAZY8ohyNNPP63Nmzfr4Ycf1p49e7Rq1Spt2LBBXV1dE358MBjUbbfdphMnTuiZZ57RoUOHtG3bNtXX1097eMBO1s6rUJHPpZ7hoPaePmv1OAAAwMZakiEIryxGbqMYHVdi8+bN2rZtm5588kkdOHBA9957r/x+vzZu3ChJuvvuu7Vly5bkx997773q6+vT/fffr8OHD+v555/XI488ovvuu8+qTwFImQ/MKZPbaah9YEyn+kasHgcAJEmuqT7gscce0z333JP84r5161Y9//zz2r59u77yla9c8PHbt29XX1+fXn/9dbndbklSY2Pj9KYGbMjjcujDS6r1b3vb9LPmTq2ZW271SAAAwIaiUVMt7fEQpJ4QBLnNmewE4dXKmLy77rpL3d3deuihh9TR0aHVq1frxRdfTJalnzp1Sg7H+GtOGxoa9NOf/lRf/OIXtXLlStXX1+v+++/Xl7/8Zas+BSBl8jxOrZpdqrdPntWbx/o0t6LA6pEAYGqbIMFgULt379b69evHfwOHQ+vXr9euXbsmfMxzzz2ndevW6b777lNNTY2WL1+uRx55RJFI5KJ/TiAQ0ODg4HlvQC64vSl2Q/ZnLZ2sjQIAgJQ41Tei4UBYXpdDC6sKrR4HsJQ7cQ6Lc7SYok2bNunkyZMKBAJ68803tXbt2uSv7dy5U0888cR5H79u3Tq98cYbGhsb09GjR/XVr371vI4QwE4SJ7HeOE4vCIDMMKUQpKenR5FIJPnqhoSamhp1dExc5nzs2DE988wzikQieuGFF/Tggw/q29/+tv7sz/7son/Oo48+qpKSkuRbQ0PDVMYEstYti6vkcTl0vMev97uGrR4HAADYUKIPZEltkVzOK6oIBGzD6aAYHQBm2tr5iXL0PosnAYCYlP/UE41GVV1dre9973tas2aN7rrrLn3ta1/T1q1bL/qYLVu2aGBgIPl2+vTpVI8JZIRCr0s3L6yUJL24f+JgEQAAYDqa2wYkSU2UogNyJ89h0QkCADNlzdwyuRyGWvtHdZpeEAAZYEohSGVlpZxOpzo7O897f2dnp2prayd8TF1dnRYtWnTemufSpUvV0dGhYDA44WO8Xq+Ki4vPewNyxYblsf8tEYIAAIBUaKYUHUhKbEOxCQIAMyff49KK2bEXW7x5nG0QANabUgji8Xi0Zs0a7dixI/m+aDSqHTt2aN26dRM+5sYbb9SRI0cUjY6/subw4cOqq6uTx+O5wrEB+1q/tEZOh6GW9kFeMQEAAGYcIQgwLnkOi00QAJhRyV6QY/SCALDelM9hbd68Wdu2bdOTTz6pAwcO6N5775Xf79fGjRslSXfffbe2bNmS/Ph7771XfX19uv/++3X48GE9//zzeuSRR3TffffN3GcB2Eh5gUfXNcbuZ/60mW0QAAAwc7oGx9QzHJDDkJbUEoIAyXNYbIIAwIxaOy/eC0I5OoAM4JrqA+666y51d3froYceUkdHh1avXq0XX3wxWZZ+6tQpORzj2UpDQ4N++tOf6otf/KJWrlyp+vp63X///fryl788c58FYDMfXV6rXcd69eL+Dn3h5vlWjwMAAGwisQWyoKpQeR7nZT4asD9n/GfXcIQQBABm0jWN5XI6DJ3uG1Vr/6jqS/OsHglADptyCCJJmzZt0qZNmyb8tZ07d17wvnXr1umNN964kj8KyEm3L6vRw881a/eps+oaGlN1kc/qkQAAgA0kStE5hQXEuBPnsKKcwwKAmVTodWl5fYnePd2vN4/16reunm31SABy2JTPYQFIvbqSPK1uKJVpSi+1dFo9DgAAsInxPpASiycBMgPF6ACQOtcnTmIdoxwdgLUIQYAMtWFZrSTpxf30ggAAgJmxn00Q4DyuZDE6IQgAzLS18+kFAZAZCEGADLVhWaxnZ9fRXg2MhCyeBgAAZLuB0ZBO941KYhMESHDFi9FDEc5hAcBMu6axXA5DOtE7oo6BMavHAZDDCEGADDW/qlCLa4oUjpr6+SFOYgEAgOlpiZ/Cml2Wp5J8t8XTAJnBGd8EiXAOCwBmXLHPnXzhBdsgAKxECAJksMQ2CCexAADAdFGKDlzITScIAKTU9fGTWLuOEoIAsA4hCJDBNiyP9YL84nC3RoJhi6cBAADZjFJ04ELJTpAo57AAIBVuWFApSXqdEASAhQhBgAzWVFeshvI8jYWi+sWhbqvHAQAAWYxNEOBCLkd8E4RidABIiWvnlcvpMHSqb0Sn+0asHgdAjiIEATKYYRj62PI6SdILnMQCAABXaCwU0dFuvyRpeT2bIEDCeDE6IQgApEKh16VVs2Pfe+w6xjYIAGsQggAZ7o4VsRDk5wc6NRaKWDwNAADIRgc7hhSJmqos9Ki6yGv1OEDGcCWL0TmHBQCpcuPC+EmsIz0WTwIgVxGCABlu1ewSzSrxyR+M6JXDnMQCAABTlziF1TSrRIZhWDwNkDlcTs5hAUCqrVtQISnWC2Ka/H0LIP0IQYAMZxiGPho/ifUiJ7EAAMAV2N+aKEWnDwQ413gxOk/KAUCqXD2nTB6XQ11DgeR5TgBIJ0IQIAt8bEWtJOmlA50KhDmJBQAApqaFUnRgQolOkDDnsAAgZXxup66ZWyZJ2nWUk1gA0o8QBMgCV88pU3WRV0NjYb1+hCIxAAAweeFIVAc7hiRJy2dRig6cy+WIn8NiEwQAUirZC3KU5zQApB8hCJAFHA5DdyyPbYO8sK/d4mkAAEA2OdrtVyAcVaHXpTnl+VaPA2SUxDks05QiBCEAkDKJXpBdx3oV5e9bAGlGCAJkiTtWxHpBftbSqVCEdX0AADA5+1vjpeh1xXI4KEUHzpU4hyWJ77EBIIVW1peo0OtS/0hILe2DVo8DIMcQggBZ4trGclUWejQwGtIu1kcBAMAkNbfFnmhoog8EuEDiHJbEJggApJLL6dB188oliec0AKQdIQiQJZwOQ7cvi53E+o/9nMQCAACT00wpOnBR526ChCOEIACQSjfET2K9Tjk6gDQjBAGyyMeWx09iNXcqzLo+AAC4jGjUVEt8E2QZpejABVznnIgLR/n+GgBS6YYFsXL0t473cYIQQFoRggBZZO38cpXlu9XrD+qt431WjwMAADLc6bMjGgqE5XE6dFVNodXjABnHMAw540FImHNYAJBSS2qLVF7gkT8Y0Xtn+q0eB0AOIQQBsojb6dCG+Emsn+zjJBYAALi0RB/I4toiuZ186w9MJLENwquSASC1HA5D6+bHT2IdoRcEQPrwkxCQZT6+MnYS68X9HZzEAgAAl0QfCHB5iRCEYnQASL118V6QV4/QCwIgfQhBgCyzbn6Fygs86vMH9cYxTmIBAICLa072gRCCABfjim9JhShGB4CUu2lhrBdkz6mz8gfCFk8DIFcQggBZxuV06KPL4yex3muzeBoAAJDJEiFIE6XowEWxCQIA6TO3Il+zy/IUiph0nQJIG0IQIAv9xor4SazmDm4XAwCACXUNjql7KCDDkJbWFVk9DpCxXE46QQAgXQzD0M1XxbZBfvk+J7EApAchCJCF1s6vUGWhR/0jIb1+lDIxAABwocQWyPzKAuV7XBZPA2QulyP2YzGbIACQHjctrJIkvXqk2+JJAOQKQhAgCzkdhu5YHtsG+cm7nMQCAAAXSpSiL6/nFBZwKYlNkHCUTRAASIcbFlTIMKTDncPqHByzehwAOYAQBMhSv7EyFoL8tLlDwTA/sAEAgPNRig5MTqIThGJ0AEiPsgKPVsRfpPEqJ7EApAEhCJClrmksV3WRV4NjYVZIAQDABcZDEDZBgEvhHBYApN9NC2O9IK8eIQQBkHqEIECWcjoMfSxekP6T99otngYAAGSSwbGQTvWNSGITBLgcitEBIP1uumo8BDFNQmgAqUUIAmSxxEmsl5o7FQhHLJ4GAABkipb4Fkh9aZ5K8z0WTwNkNpeTTRAASLc1c8uU53aqeyigQ51DVo8DwOYIQYAsdvWcMtUW+zQUCOuVw6yQAgCAmMQprCa2QIDLohMEANLP63LqunnlkugFAZB6hCBAFnM4DH18ZeIkVpvF0wAAgEzR3DYgiVNYwGQkQpBwlHNYAJBON8dPYv2SEARAihGCAFnuzlWzJEkvtXRqNMhJLAAAMH4Oi1J04PISnSCcwwKA9Er0grx5vJcT3wBSihAEyHKrZpeooTxPI8GIfn6wy+pxAACAxcZCEb3fNSyJTRBgMlyO2I/FnMMCgPRaXFOkqiKvxkJR7T551upxANgYIQiQ5QzD0J0rY9sgz73bavE0AADAaoc7hxSJmirLd6uuxGf1OEDGcyc3QTiHBQDpZBiGbloY2wahFwRAKhGCADaQOIn18qFuDY6FLJ4GAABYaX/r+CkswzAsngbIfE6K0QHAMskQ5AghCIDUIQQBbGBJbZEWVhcqGI7qpeZOq8cBAAAWohQdmBqXM/ZjcTjCJggApFuiHH1f64D6/EGLpwFgV4QggA2cexLr399rs3gaAABgpeZEKXo9pejAZLjimyBhitEBIO2qi31aUlsk05R++X631eMAsClCEMAmfmNVnaTYHU1ePQEAQG6KRE0d7Eicw2ITBJiMRDE6IQgAWOOWxVWSpFcOcxILQGoQggA2saCqUMtmFSscNfUf+9utHgcAAFjgWPewxkJR5XucmldRYPU4QFZIbIJECEEAwBK3XBUPQd7vlmnydzGAmUcIAtjIJ+IF6f/+LiexAADIRYlTWEvriuVwUIoOTIbLmShGpxMEAKywprFM+R6nuocCOtA+ZPU4AGyIEASwkY+vjJ3EevN4nzoHxyyeBgAApBul6MDUuePF6GyCAIA1vC6n1s2vkCT94jC9IABmHiEIYCOzy/K1Zm6ZTFN6/j1OYgEAkGuSpeiEIMCkOR2JTRBCEACwyngvCCEIgJlHCALYzJ3xbZDnOIkFAEBOMU3znBCkxOJpgOyROIcV5hwWAFjmg/FekLdP9skfCFs8DQC7IQQBbOZjK+vkMKS9p/t1um/E6nEAAECatPaPamA0JJfD0FU1hVaPA2SNRDF6mHNYAGCZxsoCza3IVyhiatfRXqvHAWAzhCCAzVQX+XR9/JbmTziJBQBAzkhsgVxVUySvy2nxNED2cDliPxaHo2yCAICVEtsg9IIAmGmEIIAN/cbKWZKkf+ckFgAAOaMlHoI01dEHAkyFO34Oi2J0ALDWLYvivSDvE4IAmFmEIIANfXR5rVwOQy3tgzraPWz1OAAAIA0oRQeujDO+CUIxOgBYa92CCrmdhk72juhEj9/qcQDYCCEIYEPlBR7ddFWlJOkn73ISCwCAXNDSNiCJEASYKjfF6ACQEQq8Ll0zt1wS2yAAZhYhCGBTd8ZPYj33bqtMk1e1AbCXxx9/XI2NjfL5fFq7dq3eeuutST3uqaeekmEY+tSnPpXaAYE0O+sPqm1gTJLURAgCTImTYnQAyBi3LI73ghwiBAEwcwhBAJu6bVmNPC6Hjnb7dbBjyOpxAGDGPP3009q8ebMefvhh7dmzR6tWrdKGDRvU1dV1ycedOHFCX/rSl3TzzTenaVIgfVraY6ew5lbkq8jntngaILu4nPFidM5hAYDlEuXou471KhCOWDwNALsgBAFsqtjn1q3xV1BQkA7ATh577DHdc8892rhxo5qamrR161bl5+dr+/btF31MJBLR7/3e7+nrX/+65s+fn8ZpgfRojp/CohQdmDoXmyAAkDGW1hWpusirkWBEvzp+1upxANgEIQhgY78RP4n1k/faOYkFwBaCwaB2796t9evXJ9/ncDi0fv167dq166KP+8Y3vqHq6mp9/vOfn9SfEwgENDg4eN4bkMkoRQeu3HgIQicIAFjNMAzdurhakvTyoUtvegPAZBGCADb2kaXVynM7dapvRO+dGbB6HACYtp6eHkUiEdXU1Jz3/pqaGnV0dEz4mFdffVXf//73tW3btkn/OY8++qhKSkqSbw0NDdOaG0i18RCkxOJJgOzjjp/DirAJAgAZ4dYlsasWLx8kBAEwMwhBABvL97i0vin2RCEnsQDkoqGhIX32s5/Vtm3bVFlZOenHbdmyRQMDA8m306dPp3BKYHpGgxEd6x6WxCYIcCUSxeihCJsgAJAJblxYKZfD0LEev070+K0eB4ANEIIANnfnyjpJ0r+/18ar2wBkvcrKSjmdTnV2dp73/s7OTtXW1l7w8UePHtWJEyd05513yuVyyeVy6Qc/+IGee+45uVwuHT16dMI/x+v1qri4+Lw3IFMd7BhU1JQqCz2qKvJaPQ6QddzO+DksitEBICMU+dy6trFckrSTk1gAZgAhCGBztyyuUkmeW52DAb1+tMfqcQBgWjwej9asWaMdO3Yk3xeNRrVjxw6tW7fugo9fsmSJ9u3bp7179ybfPvGJT+jWW2/V3r17OXMFW0icwmqaVSLDMCyeBsg+Tkfsx2KK0QEgc3x4SaIXpNviSQDYASEIYHNel1OfWBUrSP/X3WcsngYApm/z5s3atm2bnnzySR04cED33nuv/H6/Nm7cKEm6++67tWXLFkmSz+fT8uXLz3srLS1VUVGRli9fLo/HY+WnAswIStGB6XE5KUYHgEyT6AXZdaxXI8GwxdMAyHYuqwcAkHq/vWa2/vGNk3qxuUPDgbAKvfxPH0D2uuuuu9Td3a2HHnpIHR0dWr16tV588cVkWfqpU6fkcPA6D+SOlrYBSYQgwJVyJzZBOIcFABljQVWhZpfl6czZUe062quPLK2xeiQAWYxnQoEcsGp2ieZXFehYt18v7GvX717D+RcA2W3Tpk3atGnThL+2c+fOSz72iSeemPmBAIuEI1Ed7BiSJC2bVWLxNEB2ShSjcw4LADKHYRi6dXG1/vGNk3r5UBchCIBp4WWSQA4wDEO/ffVsSZzEAgDATo71+BUIR1XgcWpueb7V4wBZabwYnXNYAJBJEiexXj7YLdMkqAZw5QhBgBzxmx+ol2FIbx7v0+m+EavHAQAAM6A5fgpraV2xHA5K0YErwSYIAGSmdfMr5XU51No/qve7hq0eB0AWIwQBcsSs0jzdsKBCkvTjd1otngYAAMyE5lZK0YHpcjvpBAGATJTncWpd/HmMlw92WTwNgGxGCALkkMRJrB/tOcMqKQAANtDSnghB6AMBrhSbIACQuW5dXC1JevkQIQiAK0cIAuSQDctqle9x6kTviPacOmv1OAAAYBpM01RzWywEaWITBLhiyU6QKJ0gAJBpEiHI2yfOanAsZPE0ALIVIQiQQwq8Lt2xvE6S9MxuTmIBAJDNWvtHNTAaksth6KqaQqvHAbKWy8E5LADIVHMq8rWgqkDhqKlfHu6xehwAWYoQBMgxv311vSTpJ++1aSwUsXgaAABwpVriWyBX1RTJ63JaPA2QvcbPYbEJAgCZ6CNLayRJOw50WjwJgGxFCALkmOvnV6iuxKehsbB2Huq2ehwAAHCFkqew6jiFBUwHxegAkNk+siR2Euvnh7oUjhBYA5g6QhAgxzgchu5cNUuS9Ny7nMQCACBbjZeiE4IA03FuMbppEoQAQKZZM7dMpflu9Y+EtOdUv9XjAMhChCBADvpEPATZcaBLQxSLAQCQlRLnsAhBgOlJFKNLUiRKCAIAmcbldCQL0jmJBeBKEIIAOWjZrGLNrypQIBzVz5r5BgIAgGzTPxJUa/+oJGkpIQgwLS7n+I/FYUIQAMhIH1kaC0FeIgQBcAUIQYAcZBiGPrkqVpD+3LttFk8DAACmKrEFMqc8X8U+t8XTANnN5RjfBCEEAYDM9MFFVXI7DR3r9ut4j9/qcQBkGUIQIEd9YnXsJNarR3rUOxyweBoAADAViT4QStGB6TsvBKFwFwAyUrHPrbXzKiRxEgvA1BGCADlqXmWBVs4uUSRq6oV97VaPAwAApqCZPhBgxjjZBAGArJA4ifWfhCAApogQBMhhiYL0f9vLSSwAALJJc9uAJKmJEASYNsMwktsg4QghCABkqvVLayRJvzpxVgMjIYunAZBNCEGAHHbnqlkyDOntk2d15uyI1eMAAIBJGAtFdLQ7dgt72awSi6cB7MHljIcgUc5hAUCmaijP1+KaIkWipnYe7rJ6HABZhBAEyGE1xT5dH7+p+e/vchILAIBscKhjSJGoqfICj2qKvVaPA9iCyxH70ZhNEADIbOMnsQhBAEweIQiQ4xIF6f+2t9XiSQAAwGQkStGXzSqWYRiX+WgAk8EmCABkh4/ET2LtPNSlUIS/swFMDiEIkOPuWF4rt9PQwY4hHYg/qQIAADJXsg+kjj4QYKYkO0EoRgeAjLa6oVSVhR4NjYX1q+N9Vo8DIEsQggA5rjTfo9uaYq+keOK1E9YOAwAALqulLfaiBUrRgZnDOSwAyA5Oh6EPL4mdxPpZS6fF0wDIFoQgAPT5m+ZJkn78Tqu6hwIWTwMAAC4mEjV1oH1IUuwcFoCZ4WQTBACyxu1NtZKkl1o6ZZr8vQ3g8ghBAGjN3HJ9YE6pgpGo/vGNk1aPAwAALuJEr1+joYjy3E7Nqyy0ehzANtyJThDuywNAxrvpqkrle5xq7R9VcxtnvQFcHiEIAEnSF26aL0n6pzdOaiwUsXgaAAAwkcQP+kvqipKvXAcwfS5n7EfjEOewACDj+dxO3bKoSpL00+YOi6cBkA0IQQBIkjYsq9Hssjz1+YP60Z5Wq8cBAAATSPaBUIoOzKhEMXqEc1gAkBVuXxbrNiUEATAZhCAAJMVe/bbxxlg3yN+/ekxRfgAEACDjNLcNSJKWzSqxeBLAXlzxc1ihKOewACAbfHhxjVwOQ4c7h3W8x2/1OAAyHCEIgKTfvWa2irwuHev2a+fhLqvHAQAA5zBNc3wThFJ0YEY5HbEfjSOcwwKArFCS79a6BRWSpJ+xDQLgMghBACQV+dz69No5kqS//+Vxi6cBAADn6hoKqNcflMOQltQWWT0OYCvu+DmsMJsgAJA1bm/iJBaAySEEAXCez93QKKfD0OtHe7XvzIDV4wAAgLjEFsiCqkL53E6LpwHsJXEOK8xJWADIGrc11UqS3jndr67BMYunAZDJCEEAnKe+NE93rqyTJP3pv76nsVDE4okAAIB0bh8Ip7CAmeaKn8MKcw4LALJGbYlPqxpKZZrSSwc6rR4HQAYjBAFwga9+fKkqCjw60D6ov3jxkNXjAAAASS3t9IEAqZIsRo9wDgsAssmGZYmTWIQgAC6OEATABaqLfPrL/7JSkrT9tePaeYiSdAAArNYcP4e1bFaJxZMA9uOKd4JEOIcFAFllw7LYSaxdR3s0OBayeBoAmYoQBMCEPrykRr9/Q6Mk6Uv/5111DwWsHQgAgBw2NBbSyd4RSdLSOjZBgJmWOIcVIgQBgKyyoKpQC6oKFIqYevkgL+AEMDFCEAAX9ZU7lmhxTZF6hoP602felWnyQyEAAFY40D4kSaor8am8wGPxNID9OOPnsCKcwwKArJPYBvlpc4fFkwDIVIQgAC7K53bq//30B+RxOfTyoW49+h8HOREAAIAFWihFB1LKHT+HFeZ7XQDIOncsr5MkvXywW6PBiMXTAMhEhCAALmlxbZEe+o0mSdL3Xjmmz37/TU5jAQCQZslSdE5hASnhcsbPYUUIQQAg2yyvL9bssjyNhiL6xWFOYgG4kMvqAQBkvv92/VwV+Vza8qN9ev1orz7+//5Sf/uZq3XdvPIJP77PH9TB9kGdOTuq/tGgzo6E1D8S0kgwLLfTIbfTIa/LoTyPU7curta1jWUyDOOKZotETR3uHNK7p/u193S/2gbG9KFFVfqtq+tVmj/9cyGBcEQH2oc0GowoEI4oEI7K43LopoWVcjvJkQEA6ZEoRW+iFB1IifFidM5hAUC2MQxDH1tRp++9ckwv7OvQR+ObIQCQQAgCYFI+ubpey2YV695/2qP3u4b16W1vaO28cuV7nPK6nfK5nOoeDuhg+6C6prAp8t2dRzWvskD/5ZrZ+u2rZ6um2HfZx4wEw/rPA116bm+bXj/ao5FfW3d95XC3vvXiQX18RZ0+fd2cKwpZTvWO6IdvndT/efuM+vzBC379poWVemLjtclXDQIAkCrBcFTvdw5L4hwWkCqueCcImyAAkJ3uWF6r771yTDsOdGosFJHP7bR6JAAZhBAEwKQtrC7Sv226UV/90T49u7dNrx/tvejHzq3IV2NFgcoLPCrNd6s0z6MCr1PhqKlgOKpgOKq2gVG9uL9Dx3v8+osXD+nbPzuspXVFWj6rRMtmFatpVokKvE71j4TUPxLbKHnjWK9eauk8L/go9Lq0cnaJVjWUqizfrR/tadXBjiH9+J1W/fidVq1uKNWXP7pE6xZUXPLzM01TvzjcrSdeP6FfHO5Woge+LN+tikKvvK7YBktL+6BePdKjb/6kRV//5PIZ+XcLAMDFHOkaVjASVZHPpdlleVaPA9iSyxF7YQv9dwCQnVY3lGpWiU9tA2P65fs9uq2pxuqRAGQQQhAAU5Lvcemv71qt/3b9XLX2j2o0GNFYKKLRUFQleW4tqSvS4poiFXgn99fLNz8Z1vPvtetf3j6tt0+e1f7WQe1vHbzs4xrK8/SJVbP0sRV1WlJbLKdjfNPjnpvn690zA3rqrVN6dm+r9p7u16e3vaEPLqrSn25YrOX1558SiURN/cf+dn1359HkuRFJuvmqSv236+fqI0uqz9v4eHF/h/7wn3bryV0ntbCmSJ+9fu6kPlcAAK7EuX0gV3o+EsClJc5hhTiHBQBZyTAMfXR5nba/dlz/sa+dEATAeQhBAEyZYRi6prFc18zA71Xgdel3r23Q717boDNnR7TvzID2tw2ouW1QzW2DikRNlea7VZbvUWmeW/MqC/TxlXVa3VB60SeCDMPQ6oZSrW4o1ebbF+lvdhzR/37rlF453K1XDndrUU2hqoq8qiz0qizfo18c7tbxHr8kKd/j1Kevm6PPXj9XjZUFE/7+H11eq/+xYbH+8qeH9H8/16x5FQW66arKGfi3AQDAhZrbBiRJTZzCAlIm8YKXCOewMEmPP/64/vIv/1IdHR1atWqV/uZv/kbXXXfdZR/31FNP6dOf/rQ++clP6tlnn039oEAO+diKWm1/7bheOtCpQDgir4uTWABiCEEAZIzZZfmaXZavO1bMXIlZdZFP3/zUcn3h5nl67KXD+re9bTrcOazD8dvqCSV5bm28sVGfW9eosoLLF6r/0YcW6EjXsH78Tqv+6Ie79ex9N2p+VeGMzQ0AQEJLfEtxGaXoQMokNkHCnMPCJDz99NPavHmztm7dqrVr1+o73/mONmzYoEOHDqm6uvqijztx4oS+9KUv6eabb07jtEDuuHpOmaqLvOoaCuj1I726dcnF//cIILcQggDICXMrCvT//NcP6Eu3L9aJXr96hgPqGQqqZzigWaV5+u01s1U4yRNeUmzb5NHfWqGTvX7tOdWvh59r1j9+fm0KPwMAQC4yTfO8c1gAUmO8GJ1zWLi8xx57TPfcc482btwoSdq6dauef/55bd++XV/5ylcmfEwkEtHv/d7v6etf/7p++ctfqr+/P40TA7nB4TB0x/JaPbnrpF7Y104IAiCJEARATmkoz1dDef6M/F4+t1Pf+u2Vuv2vX9Fbx/sUikTlPqc7BACA6TpzdlRDY2F5nA4trGbjEEiVxCYIxei4nGAwqN27d2vLli3J9zkcDq1fv167du266OO+8Y1vqLq6Wp///Of1y1/+8rJ/TiAQUCAQSP7z4ODlexMBSHesqNOTu07qZy2deoSf0QHE8TcBAEzDwqpClea7FQhHk+dKAACYKc3xry1X1RTK4+JbdyBVEp0gITpBcBk9PT2KRCKqqTm/dLmmpkYdHR0TPubVV1/V97//fW3btm3Sf86jjz6qkpKS5FtDQ8O05gZyxbWN5aos9GhgNKRdR3utHgdAhuAnKQCYBofD0NVzyiRJu0+etXgaAIDdtCRK0TmFBaTU+CYI57Aws4aGhvTZz35W27ZtU2Vl5aQft2XLFg0MDCTfTp8+ncIpAftwOgxtWFYrSXphX7vF0wDIFIQgADBNV88plSTtOUUIAgCYWYk+kGWzCEGAVEqEICHOYeEyKisr5XQ61dnZed77Ozs7VVtbe8HHHz16VCdOnNCdd94pl8sll8ulH/zgB3ruuefkcrl09OjRCf8cr9er4uLi894ATM7HVtRJkn7W0qkwXU8ARAgCANN29dzYJsgeNkEAADMscQ6raVaJxZMA9pY4h8WTZbgcj8ejNWvWaMeOHcn3RaNR7dixQ+vWrbvg45csWaJ9+/Zp7969ybdPfOITuvXWW7V3717OXAEpsHZeucoLPOrzB/XGsT6rxwGQAShGB4BpWjW7VE6HobaBMbX1j2pWaZ7VIwEAbKDPH1T7wJgkaWldkcXTAPZGMTqmYvPmzfrc5z6na665Rtddd52+853vyO/3a+PGjZKku+++W/X19Xr00Ufl8/m0fPny8x5fWloqSRe8H8DMcDkd2rCsVv/7rVN6fl+bbrpq8qfoANgTmyAAME0FXlfyySlOYgEAZsqB+CmsuRX5KvK5LZ4GsDeK0TEVd911l/7qr/5KDz30kFavXq29e/fqxRdfTJalnzp1Su3tdBEAVvp4/CTWT5s5iQXgCkOQxx9/XI2NjfL5fFq7dq3eeuutST3uqaeekmEY+tSnPnUlfywAZKw1cxInsfqtHQQAYBvNlKIDacMmCKZq06ZNOnnypAKBgN58802tXbs2+Ws7d+7UE088cdHHPvHEE3r22WdTPySQw66fz0ksAOOmHII8/fTT2rx5sx5++GHt2bNHq1at0oYNG9TV1XXJx504cUJf+tKXdPPNN1/xsACQqRK9ILvZBAEAzJCWNkrRgXRxOePF6LxaGABsIXESS5Ke39dm8TQArDblEOSxxx7TPffco40bN6qpqUlbt25Vfn6+tm/fftHHRCIR/d7v/Z6+/vWva/78+dMaGAAy0dXxTZDm1gGNhSIWTwMAsIPmZAhCKTqQai5HvBidTRAAsA1OYgFImFIIEgwGtXv3bq1fv378N3A4tH79eu3ateuij/vGN76h6upqff7zn5/UnxMIBDQ4OHjeGwBkstlleaou8iocNfXemQGrxwEAZLmxUERHu4clSU1sggAplziHRQgCAPbBSSwACVMKQXp6ehSJRJJlXwk1NTXq6OiY8DGvvvqqvv/972vbtm2T/nMeffRRlZSUJN8aGhqmMiYApJ1hGFoTP4lFOToAYLoOdgwpakqVhR5VF3mtHgewvcQ5LF4pDAD2wUksAAlXVIw+WUNDQ/rsZz+rbdu2qbKyctKP27JliwYGBpJvp0+fTuGUADAzEiexdp8kBAEATE+iD2RpXbEMw7B4GsD+EuewKEYHAHvhJBYASXJN5YMrKyvldDrV2dl53vs7OztVW1t7wccfPXpUJ06c0J133pl8XzQa+wvH5XLp0KFDWrBgwQWP83q98np5xRuA7JIoR99z8qxM0+RJKwDAFWtui51WpA8ESA+K0QHAnn79JNZNV03+RdoA7GNKmyAej0dr1qzRjh07ku+LRqPasWOH1q1bd8HHL1myRPv27dPevXuTb5/4xCd06623au/evZy5AmAry+uL5XE61OsP6mTviNXjAACyWEt7bBOEPhAgPdzxEIRNEACwF05iAZCu4BzW5s2btW3bNj355JM6cOCA7r33Xvn9fm3cuFGSdPfdd2vLli2SJJ/Pp+XLl5/3VlpaqqKiIi1fvlwej2dmPxsAsJDX5dSK2bFX7NILAgC4UpGoqYPtQ5KkZYQgQFo44+ewQhFCEACwm8RJrBf3d7DxB+SoKZ3DkqS77rpL3d3deuihh9TR0aHVq1frxRdfTJalnzp1Sg5HSqtGACBjXT2nVLtPntXuk2f1W1fPtnocAEAWOt7j12goojy3U40VBVaPA+QElyNejB7lyTEAsJvr55erstCjnuGgXj3So1sXV1s9EoA0m3IIIkmbNm3Spk2bJvy1nTt3XvKxTzzxxJX8kQCQFdbMLdO2Xx6nHB0AcMUSfSBL64rkdNAvBaSDi3NYAGBbLqdDH19Rpyd3ndRze9sIQYAcxMoGAMygVQ2lkqTDnUOs2QIArkiiD4RSdCB9XJzDAgBb+8TqeknST5s7NBqMWDwNgHQjBAGAGVRb7JPP7VDUlFrPjlo9DgAgC7W0UYoOpFviHBabIABgT1fPKdXssjyNBCP6zwOdVo8DIM0IQQBgBhmGoTnl+ZKkU30jFk8DAMg2pmmquS2xCUIIAqRL4hwWm7wAYE+GYeiTq2dJkp57t83iaQCkGyEIAMywOeWxEtuThCAAgCnqHAyozx+U02FoUU2R1eMAOcPtjP1oHGYTBABs65Pxk1g7D3VpYCRk8TQA0okQBABmWHITpNdv8SQAgGyTKEVfWFUon9tp8TRA7nCecw7LNAlCAMCOFtUUaUltkUIRU/+xv93qcQCkESEIAMywuRWcwwIAXBn6QABruB3jPxqzDQIA9pXYBvm3vZzEAnIJIQgAzLDEJsjJXkIQAMDU0AcCWMMZ7wSRKEcHADu7c1WdJOmN473qGBizeBoA6UIIAgAzbM45myCcUwAATEVLe3wTpI4QBEgnl2M8BKEcHQDsa3ZZvq5tLJNpSj95j20QIFcQggDADJtdlifDkEaCEfX6g1aPAwDIEoNjoeQpRc5hAemVKEaX2AQBALv7BCexgJxDCAIAM8zrcqqu2CeJk1gAgMk7ED+FVV+ap9J8j8XTALnlnEUQhSKEIABgZx9fUSeXw9C+1gEd7R62ehwAaUAIAgApMH4Sy2/xJACAbNFMKTpgGcMw5I73goSjnMMCADsrL/Dog4uqJEnPvtNq8TQA0oEQBABSYG55gSTpVO+oxZMAALIFfSCAtZzxdZAwmyAAYHuf+kDsJNaP32mlyxPIAYQgAJACiU2Qk2yCAAAmKbEJsoxNEMASbkfsx+MwnSAAYHu3La1RodelM2dHtfvkWavHAZBihCAAkAJzyuPnsOgEAQBMQiAc0fudQ5KkZfUlFk8D5CZn/BxWhHNYAGB7eR6nPrq8VpL0I05iAbZHCAIAKTA32QlCCAIAuLz3O4cVjpoqzXdrVonP6nGAnOSKb4JQjA4AueE34yexnn+vXYFwxOJpAKQSIQgApEBiE6RrKKDRIN9MAQAurbltQFLsFJZhGBZPA+SmZDE6IQgA5ITr51eottingdGQXj7YbfU4AFKIEAQAUqA036Nin0sS2yAAgMsb7wPhFBZglWQxOuewACAnOB2GPrl6liTpWU5iAbZGCAIAKTKHk1gAgEmiFB2wnttJMToA5JpPxU9i/fxglwZGQhZPAyBVCEEAIEXmlhdIkk72+i2eBACQySJRUwfaCUEAqyU3QTiHBQA5Y2ldsZbUFikYier5fe1WjwMgRQhBACBFEpsgp9kEAQBcwolev0aCEeW5nZpXWWj1OEDOcnEOCwByUqIgnZNYgH0RggBAiiTK0U8SggAALiFxCmtJXVHylegA0i95DotNEADIKZ9YPUuGIb11oo8XMQI2RQgCACkyNx6CnOrlmygAwMU1tw1I4hQWYLXxYnRCEADIJXUlebpxQaUk6Z/fOmXxNABSgRAEAFIkcQ7rzNlRRfhhGgBwES3JUvQSiycBcpvbmegE4RwWAOSau9fNlST98I2T8gfCFk8DYKYRggBAitSV5MntNBSMRNUxOGb1OACADGSaZvIcFpsggLXYBAGA3PWRpTVqrMjX4FhY/7rnjNXjAJhhhCAAkCJOh6HZZfFekF6/xdMAADJRx+CY+vxBOR2GFtUUWT0OkNOSnSAUowNAznE6DP33m+ZJkra/epxrDoDNEIIAQAolytEpVwMATKS5NbYFsrCqUD630+JpgNzmSmyCUIwOADnpd9bMVkmeWyd6R7TjQKfV4wCYQYQgAJBCiRDkJOXoAIAJcAoLyBxOR2IThBAEAHJRvselz6ydI0n6+1ePWzwNgJlECAIAKTQ3Xo5+kk0QAMAEmtsGJElNhCCA5ShGBwB8bl2jXA5Dbx3v03tn+q0eB8AMIQQBgBRq4BwWAOASxjdBSiyeBADF6ACA2hKf7lw1S5L0fbZBANsgBAGAFEpugnAOCwDwa/pHgmrtH5XEJgiQCZLF6HSCAEBO+3y8IP3599rVPjBq8TQAZgIhCACkUKITZGA0pIGRkMXTAAAySUt8C6ShPE8leW6LpwHAJggAQJKW15do7bxyhaOm/vbnR6weB8AMIAQBgBTK97hUVeSVJJ3o9Vs8DQAgkyRPYdVxCgvIBHSCAAAS/nj9IknSD988pV+d6LN4GgDTRQgCACk2r7JAknS8hxAEADAuUYq+jFNYQEZwOWI/HofYBAGAnLduQYXuuqZBkvSVf31PY6GIxRMBmA5CEABIsQVVsRDkWPewxZMAADJJchOknhAEyASJc1iRKJsgAADpqx9bqqoir452+/X4y5zFArIZIQgApFhiE+QYmyAAgLiRYFhH4+H48lmcwwIywfg5LDZBAABSSb5b3/jEMknSd3ce1cGOQYsnAnClCEEAIMXmVxZK4hwWAGDcgfYhRU2pqsir6mKf1eMAkOSMn8OiGB0AkPDR5bW6valG4aipL//rPkX4GgFkJUIQAEixeVXjnSCmyTdMAIDxPpDl9IEAGYNidADArzMMQ9/81HIV+Vx693S/vvbjfTpzdsTqsQBMkcvqAQDA7hrK8uV0GBoJRtQ5GFBtCa/4BYBc19wa7wPhFBaQMVxsggAAJlBT7NMDH1+qL//rPj31q9P6l7dP6yNLa/S5dY26dl6ZTFOKRE1FTVOmJEOSwzBkGJLb6ZDbyWvQM0kgHNG7pwf05rFevXWiT2dHgopGpWj8RauJ164asddGyOU0VOh1qdDrVpHPpdJ8t25YUKmbFlYqz+O06LPAVBGCAECKeVwOzSnP1/Eev451DxOCAAC0P7EJQik6kDFcdIIAAC7irmvnqLLQq3947YRePdKjl1o69VJL52UfZxhSXbFPDeX5mlOer7rSPDkNIxaYxJ9tL/S5VJrvUWmeW6X5HvmDYXUPBdQ9FFDX4JhCUVMFHqcKvC4VeFyx/+t1Jv+zz+1QKGIqEI4oEIoqFInq3K9kpin5g2ENjoY0EH/zByIaC8XeRkMRjQQjGg1G5A+GNRqMKBw1VZbvVlmBR2X5HhX7XApFTQVCEY2FogqEIzIMQy6HIZfTIZfDkNNhyO005HLE/tnhMGTE/x0YMpTncWpBVYEWVBdqYXWhqgq9GglG1BX/PHuGgwpHozJNyZQp05QKvC6Vx2coL/CoJM8tp8OY8N91NGpqJBRRIBRRIBxVIByVPxDW6b4Rnegd0clev451+/XumX4FwtPb+vyH107I53bog1dV6bamGi2bVaL60jwV57lkGBPPB2sRggBAGsyrLIiFID1+3bCw0upxAAAWCoQjOtw5JIlNECCTuOJPqoSinMMCAFzoI0tr9JGlNTrSNaR/3HVSz+w+I38wcsnHmKbUNjCmtoExvXm8L02TzoyB0ZBO9Kbu9JfH6VDwCk5QFnldKs5zqyTPLbfT0MBoSP2jIQ2OhjTZZc7KQo/WzqvQ2vnlaijLl8NhyGHEN3gkmVIyjAlHTA0HwhoOhDU0FtKZs6PacaBLrf2j+llLp352ThiW73FqVmmein0ueVyxLSCvy6Hr5pXrDz64YMqfK2YOIQgApMH8ygL9XNKxbsrRASDXvd85rFDEVEmeW7PL8qweB0Bc4pWllN4CAC5lYXWRvv7J5XrgN5o0GorIYRhyGobiVxWT55SiZuzJ8zNnR3W6b0SnekfUMTgmKbYd4TAMmaY0NBZ7Ev/sSEgDI0HleVyqLvKqqsir6iKvPC6HRoIRDQfCGgmENRyIaCQYlj8Qlj8Y2+bwuBzyupzyuBzyOI3YNkJ8DlOmCrwulcSDg5I8twq8LuW5nfK5HfK5ncpzxzZN8jxO5XucchqG+kdD6vMHddYf1OBYSG5n7GN97tifJUmhSFSRaCwoCEdNhaNRhSKmwpGoIqapc2tRB0dDOtI1rCPdwzrVN5IMQPI9TtUU+1RZ6JHH5ZAhI3mKajgQ1ll/UH3+oAbHwpKkoUBYQ4GwWvtHL/r/I088fPC6nZpdlqfGinzNrShQY2W+VtSXakFVwbQ2Nr7+CVMt7YP6WXOnfnG4W6f7RtTrD2okGNGRruELPv4/D3TpjuV1aijPv+I/E9NDCAIAaTBejn7hF0MAU/f444/rL//yL9XR0aFVq1bpb/7mb3TddddN+LHbtm3TD37wA+3fv1+StGbNGj3yyCMX/Xgg1RKl6MtmFbMuD2SQxM12zmEBACZjMn0f+R6Xqot8unpOWZqmyg5joYi6hwIqK/Co0Du5p6dDkeh5J70GRkMKR0yV5MeCndI8t4p8bnldDjkucjJrphiGoWWzSrRsVom+eNsiSbHPqa1/VG39Y/IHwwqGY6fJvrvzqN7vGtabx/sIQSxECAIAaTCvMhGCsAkCTNfTTz+tzZs3a+vWrVq7dq2+853vaMOGDTp06JCqq6sv+PidO3fq05/+tG644Qb5fD79z//5P3X77berublZ9fX1FnwGyHX746Xoy+s5hQVkksQmSJhzWAAApJTP7ZxyIOB2OlRR6FVFoTdFU02Pz+3U/KpCza8qPO/9hzuHYyHIsV79zprZFk2HS8eVAIAZsSD+RfD02VEFp1nABeS6xx57TPfcc482btyopqYmbd26Vfn5+dq+ffuEH//DH/5Qf/RHf6TVq1dryZIl+vu//3tFo1Ht2LEjzZMDMfvP2QQBkDncFKMDAIAZtnZ+uSRlXSeM3RCCAEAaVBd5VeBxKhI1daovdcVigN0Fg0Ht3r1b69evT77P4XBo/fr12rVr16R+j5GREYVCIZWXl6dqTOCiIlFTB9rZBAEykSt+zD1EJwgAAJgh18wtk8OQTvWNqH3g4j0mSC1CEABIA8Mwkr0gx7rpBQGuVE9PjyKRiGpqas57f01NjTo6Oib1e3z5y1/WrFmzzgtSfl0gENDg4OB5b8BMONY9rLFQVPkep+ZVFFg9DoBzuJyJYnS2dgEAwMwo8rmTL3568xjbIFYhBAGANJlXGTuJRS8IYJ1vfetbeuqpp/TjH/9YPp/voh/36KOPqqSkJPnW0NCQxilhZ4lTWE11xSkvbAQwNclNEM5hAQCAGbR2HiexrEYIAgBpMp9ydGDaKisr5XQ61dnZed77Ozs7VVtbe8nH/tVf/ZW+9a1v6Wc/+5lWrlx5yY/dsmWLBgYGkm+nT5+e9uyARCk6kMkSxegRzmEBAIAZdN28CknSm8d7LZ4kdxGCAECazE+ewyIEAa6Ux+PRmjVrzis1T5Scr1u37qKP+4u/+At985vf1Isvvqhrrrnmsn+O1+tVcXHxeW/4/9q78yip6zPf459auqq66Z2GXqABQRHZIwREw3VMOnKjY+LMeOSaGWQ4iVmUnIx9bkZxoU00gl7G4x1DdIJh4tyjwUlGHScSshCZRO0JE5aIgkZlabZuuoHel9q+94/qKmwFpeG31PJ+ndMHu/h19dNfgXqqnnqeB1Z44zBL0YF0dWoxOuOwAACAdeZNKJfHk3g96FhXv9vh5CSKIADgkAsGO0H20gkCnJf6+nqtW7dOTz31lPbs2aOvf/3r6unp0bJlyyRJN998s1asWJG6/qGHHtK9996r9evXa8KECWpublZzc7O6u9nPA2fF40a7jyQ6QabV0AkCpBu/j3FYAADAeiUFeZpSlXgT1FZGYrmCIggAOCRZBGnrHlBnf8TlaIDMtXjxYq1Zs0YrV67U7NmztXPnTm3atCm1LL2pqUlHjx5NXf/4448rHA7rhhtuUHV1depjzZo1bv0IyFEHT/aqayCqgM+riyoL3Q4HwAf4GYcFAABsktoLwnJ0V/jdDgAAckVRKE+jioJq7RrQvtYezaotdTskIGMtX75cy5cvP+3vbdmyZcjn+/fvtz8g4Cwk94FMqS5Sno/3IgHpJlkEicQZhwUAAKx12cRy/ei1/ewFcQnPvgDAQRNTI7EYwwMAueaNI8l9IIzCAtKR30cnCAAAsMcnJyQ6Qf7U0q0TPWGXo8k9FEEAwEHJ5ej7WI4OADmHpehAevN7E0+Po+wEAQAAFhtZGNRFoxMjcdkL4jyKIADgIJajA0BuMubUUvTpY+gEAdKRb3AcVpRxWAAAwAbzJw7uBWEkluMoggCAgyZWJKr+e+kEAYCccqSjX8d7wvJ5PZpSVeR2OABOI7mrh04QAABgh/kXjJTEcnQ3UAQBAAddkByH1dYjY3iCDQC5YtehxCisi0YXKpTnczkaAKeT3AkSidEJAgAArJfsBNnT3KmO3ojL0eQWiiAA4KBx5QXyeT3qi8TU3NnvdjgAAIck94HMYBQWkLb8XhajAwAA+4wuCmn8yAIZI715tMPtcHIKRRAAcFCez6tx5QWSGIkFALlkV7IIMpYiCJCu/IPjsCIUQQAAgE2qS0KSpNauAZcjyS0UQQDAYVNriiVJv32n1eVIAABOMMakOkFYig6kLzpBAACA3UYVUQRxA0UQAHDYn8+oliT9x84jivMkGwCy3tH3LUWfWl3sdjgAzuD9RRB2twEAADtUFAYkSW3dYZcjyS0UQQDAYVdNGa3CoF9HOvq1remk2+EAAGyWHIXFUnQgvSXHYUlSJEYRBAAAWK+iMChJauumE8RJFEEAwGGhPJ8WTauSJP37zsMuRwMAsBujsIDMkOwEkRiJBQAA7DGKIogrKIIAgAs+P7tGkrRxV7MisbjL0QAA7JRaik4RBEhrft+pIkgkTn4GAACsN6ooUQRhJ4izKIIAgAuumDRSI0cEdKInrFffbXM7HACATViKDmQOv/fU0+MY47AAAIANGIflDoogAOACv8+ra2cmFqS/uPOIy9EAAOzS3Nmvtu6wvB6xFB1Icz6vR57BZhA6QQAAgB0qihKL0Y93hxVn/KZjKIIAgEu+MDgS6xdvNqs/EnM5GgCAHXYdSi5FL1J+gKXoQLrLG+wGYScIAACww8gRiU6QaNyooy/icjS5gyIIALjk0nFlGlOar55wTL9565jb4QAAbPDGkU5JjMICMoVvcDl6lHFYAADABgG/V6UFeZKkVkZiOYYiCAC4xOPx6LpZiW6Qf9952OVoAAB2eCO1FJ1RWEAmSC5Hj8QYhwUAAOyR2gvCcnTHUAQBABclR2K9/HYrbZAAkIV2JYsgY+kEATKBf7AThHFYAADALhWFib0gdII4hyIIALhoSlWRLhpdqHA0rh++sk/G8IQbALJFS2e/WrsGBpeiUwQBMoHfl3iKHGEcFgAAsMmoopAkqZVOEMf43Q4AAHKZx+PR31w2Xg0vvql/3PyOjrT36YHrpyuUZ8/yXGMSi7eaO/vV0jmguDEK+r2DHz75fR75vR55PR75vV75fR4F/V6F8nwK+r2pFwYAAB8vuRT9wtGFLEUHMgSdIAAAwG7JTpC27rDLkeQOiiAA4LKbF4xXJBbXgxv36KfbDumdli49sWSOqkvyz/o+YnGjox19OtLeryPtfTrc3qfmjn6190XU0RdRR29YJ3sjOtbVr/7Iuc+4DuV5dXFVsabVFGt6TYkuqS7SyBFBFQR9GhHwK5TnlcfjOef7B4BskhyFxVJ0IHOkdoLE2QkCAADskdoJwjgsx1AEAQCXeTwefXnhRE2pKtbyH2/XHw916LrHXtUtCy/Q5KoiXVxZpOqSkDwej2Jxo5O9YZ3oCeu9Y93aeahdfzzYrjcOd6p7IHrW37OsIE+VxSH5fR6Fo3ENROMaiMQVjccVixtF40bxuFEkZhR+32LQ/khcfzyY+J6n/1mkoN+rgM+rYJ5PAZ9X18yo0t3XTj3fYwKAjHNqKTpFECBT5HkTXa9RxmEBAACbjKII4jiKIACQJj51UYX+Y/mndMu//EFvNXdp1c/fSv1eUdAvv8+j9r6IzrQ2JODzqqY0pOqSfNWU5qu6JKSyEQGV5uepJD9PpQV5Gl0U0uji4LDGbcXjiUJIfySm4z1h7T7SqTePdOrNIx36U0uXuvqj6g3HJEnGJAol/ZG41J8oyqz73T4tuWyCxo0sOPfDAYAMtIsiCJBxfIPjsKJ0ggAAAJuMKkoUQdgJ4hyKIACQRmrLC/TcrZfr/zUe0OuHO/ROS5f2tvao6wNdHqUFeRpblq+ZY0s1e2ypZtaW6MJRhbbs7PB6PQp5fQrl+VRaENCkUYW6blbNkGvicaO+SEw94agGInGFY4nOkm//x5v6/b4Tem7HIf1d3WTLYwOAdHWss1/HkkvRa4rdDgfAWUrmUnSCAAAAuzAOy3kUQQAgzRQE/PrqlZNSn4ejce0/3iNJKh/s7Ei3BeVer0cjgn6NCA59WLlp3rhEEWT7YX3zMxexLwRAzvjj+5aiFwRIuYFMwWJ0AABgt4qixGL0491hxeNGXi+vldgtvV5FAwB8SMDv1eTKIk2uLFJFYTDtCiAf5epplRoR8KnpRK/+cOCk2+EAgGNeP9QuSZo5ttTVOAAMT2oxeoxxWAAAwB4jRyQ6QaJxo46+iMvR5IbMeSUNAJBxCgJ+fW5GtSTpue2HXI4GAJyz82C7JGlWbamrcQAYntRidDpBAACATQJ+r0oL8iRJrYzEcgRFEACArf7q0rGSpJ+9flT9kZjL0QCA/YwxqaXos8ayFB3IJKcWo1MEAQAA9kntBWE5uiMoggAAbDX/gnKNKc1XV39Uv9rd4nY4AGC7phO9au+NKODzakoVS9GBTJIchxVlHBYAALBRRWFiLwidIM6gCAIAsJXX69FffGKMJEZiAcgNyaXol1QXKeAn3QYyiZ9OEAAA4IBUJ0h32OVIcgPPygAAtvvLSxNFkN++06ZjXf0uRwMA9np9cB8IS9GBzOP3De4EiVEEAQAA9hlVlCiCtDIOyxEUQQAAtps4qlCfGFeqWNzoxZ1H3A4HAGz1+mAnCEvRgcyT7ASJxRmHBQAA7HOqE4QiiBMoggAAHPGXgwvS/237YZcjAQD7RGNxlqIDGSzZCRKhEwQAANhoFEUQR1EEAQA44rqZ1crzebTnaKcOnuh1OxwAsMW7rd3qi8Q0IuDTxFGFbocDYJjyUjtB6AQBAAD2qSgaXIzOOCxHUAQBADiitCCg8SNHSJIOHKcIAiA7vX4w0QUyfUyJfIMvpgLIHD4WowMAAAeMKgxJohPEKRRBAACOGVdeIElqohMEQJb646F2SewDATIVi9EBAIATkp0gx7vDivPmC9tRBAEAOCZZBDlwosflSADAHqml6GNL3Q0EwDnx0wkCAAAcMHJEYidING7U0RdxOZrsRxEEAOCY2sEiCDtBAGSjgWhMbzV3SpJmshQdyEh+32ARJMZOEAAAYJ+A36vSgjxJUisjsWxHEQQA4JjxjMMCkMX2HO1SJGZUPiKgsWX5bocD4BzkJcdh0QkCAABsVlGY6AZpYzm67SiCAAAcM27kYBGExegAstAfD7ZLSnSBeDwsRQcyUXIcVjhKJwgAALBXRWFiLwidIPajCAIAcExtWaII0tkfVUcvMy8BZJfkUvSZ7AMBMlZhyC9J6h6IuhwJAADIdqlOkO6wy5FkP4ogAADH5Ad8GlWUeJBnOTqAbJNcij67ln0gQKYqyU/M5mZBKQAAsFvy9ZFWxmHZjiIIAMBR49gLAiALdQ9E9V5rtyQ6QYBMVpqfGEtBxyoAALDbqU4QiiB2owgCAHAUy9EBZKNdhzpkjDSmND/1ZAZA5qETBGdr7dq1mjBhgkKhkObPn6+tW7ee8dp169Zp4cKFKisrU1lZmerq6j7yegBAbhhFEcQxFEEAAI6qHSyCHKQIAiCL7Dh4UpI0i1FYQEYrLUgUQdr7mM2NM3v22WdVX1+vhoYGbd++XbNmzdKiRYt07Nix016/ZcsW3XTTTXr55ZfV2Nio2tpaXX311Tp8+LDDkQMA0klFUaIDlSKI/SiCAAAclRyHdeA4RRAA2WP7gUQR5NJxZS5HAuB80AmCs/HII4/olltu0bJlyzR16lQ98cQTKigo0Pr16097/dNPP61bb71Vs2fP1pQpU/Tkk08qHo9r8+bNDkcOAEgnowpDktgJ4gSKIAAAR40byTgsANnFGKPtTe2SpEvHUwQBMlnJYCdIfySu/kjM5WiQjsLhsLZt26a6urrUbV6vV3V1dWpsbDyr++jt7VUkElF5ebldYQIAMkCyE+R4d1jxuHE5muxGEQQA4KjkTpAj7X2KxOIuRwMA5+/A8V6d6Akr4PNqWk2x2+EAOA+FAb+8nsR/d9INgtNoa2tTLBZTZWXlkNsrKyvV3Nx8Vvdxxx13qKamZkgh5YMGBgbU2dk55AMAkF1GjkjsBInGDV2oNqMIAgBw1KiioIJ+r+ImUQgBgEy3vSkxCmvG2BIF/T6XowFwPrxeT2okVjsvRsAGq1ev1oYNG/T8888rFAqd8bpVq1appKQk9VFbW+tglAAAJwT83lTe0cpeEFtRBAEAOMrj8bAXBEBW2ZbaB1LqbiAALMFeEHyUiooK+Xw+tbS0DLm9paVFVVVVH/m1a9as0erVq/XLX/5SM2fO/MhrV6xYoY6OjtTHwYMHzzt2AED6GVWU6AZpYy+IrSiCAAAclyyCsBcEQDZI7QNhKTqQFUoKEvO523spguDDAoGA5syZM2SpeXLJ+YIFC874dQ8//LDuv/9+bdq0SXPnzv3Y7xMMBlVcXDzkAwCQfSoKE3kHnSD28rsdAAAg9ySXox+kCAIgw3UPRPV2c2JOO0vRgexAJwg+Tn19vZYuXaq5c+dq3rx5evTRR9XT06Nly5ZJkm6++WaNGTNGq1atkiQ99NBDWrlypZ555hlNmDAhtTuksLBQhYWFrv0cAAD3VRQmOkFa6QSxFUUQAIDj6AQBkC1eP9iuuJHGlOarsvjMs90BZI7S5E6Q3rDLkSBdLV68WK2trVq5cqWam5s1e/Zsbdq0KbUsvampSV7vqcEbjz/+uMLhsG644YYh99PQ0KD77rvPydABAGkmNQ6rm7zDThRBAACOowgCIFuk9oHQBQJkjWQnSCedIPgIy5cv1/Lly0/7e1u2bBny+f79++0PCACQkZJFEDpB7MVOEACA41JFkOO9Msa4HA0AnLvtTSxFB7JNacFgJwhFEAAAYLNRyXFY7ASxFUUQAIDjageLIF0DUZaOAshYxhjtONguiaXoQDZhJwgAAHBKahwWnSC2oggCAHBcKM+nyuLEAz0jsQBkqr1tPWrvjSiU59XUmmK3wwFgkZLUThCKIAAAwF6pcVh0gtjqnIoga9eu1YQJExQKhTR//nxt3br1jNeuW7dOCxcuVFlZmcrKylRXV/eR1wMAcgN7QQBkuuQ+kJljSpXn471FQLagEwQAADglWQQ53j2gWJxx4XYZ9rO1Z599VvX19WpoaND27ds1a9YsLVq0SMeOHTvt9Vu2bNFNN92kl19+WY2NjaqtrdXVV1+tw4cPn3fwAIDMVUsRBECG2zG4D+QT40vdDQSApUoLApIoggAAAPuVFwTk8UhxI53oCbsdTtYadhHkkUce0S233KJly5Zp6tSpeuKJJ1RQUKD169ef9vqnn35at956q2bPnq0pU6boySefVDwe1+bNm887eABA5nr/cnQAyETbD7RLYh8IkG3oBAEAAE7x+7waOSLxBoxW9oLYZlhFkHA4rG3btqmuru7UHXi9qqurU2Nj41ndR29vryKRiMrLy894zcDAgDo7O4d8AACyy/iRdIIAyFyd/RH96ViXJIogQLYpLThVBDGGsRQAAMBeFYXsBbHbsIogbW1tisViqqysHHJ7ZWWlmpubz+o+7rjjDtXU1AwppHzQqlWrVFJSkvqora0dTpgAgAzAThAAmWxnU7uMSfxblpzjCyA7JDtBYnGj7oGoy9EAAIBsl3w+0UYniG0c3eC4evVqbdiwQc8//7xCodAZr1uxYoU6OjpSHwcPHnQwSgCAE5I7QY529CkcjbscDQAMT3Ip+qXjSt0NBIDlQnk+Bf2Jp8rtvYzEAgAA9koWQegEsY9/OBdXVFTI5/OppaVlyO0tLS2qqqr6yK9ds2aNVq9erV//+teaOXPmR14bDAYVDPKOOgDIZqMKg8rP86kvEtPh9j5dUDHC7ZAA4Kw17j0uSZp3wUiXIwFgh5L8PB3rGlBHX0TMJQAAAHZKFUHoBLHNsDpBAoGA5syZM2SpeXLJ+YIFC874dQ8//LDuv/9+bdq0SXPnzj33aAEAWcPj8aRGYh043uNyNABw9vrCMe1sapckXT6JIgiQjd6/FwQAAMBOowopgtht2OOw6uvrtW7dOj311FPas2ePvv71r6unp0fLli2TJN18881asWJF6vqHHnpI9957r9avX68JEyaoublZzc3N6u7utu6nAABkpNryfEnSoZN9LkcCAGdv24GTCsfiqikJafzIArfDAWCD5F4QiiAAAMBudILYb1jjsCRp8eLFam1t1cqVK9Xc3KzZs2dr06ZNqWXpTU1N8npP1VYef/xxhcNh3XDDDUPup6GhQffdd9/5RQ8AyGhjyxIvHh48yXJ0AJnjtffaJEmXTRopj8fjcjQA7FCSH5DEThAAAGC/VCcIO0FsM+wiiCQtX75cy5cvP+3vbdmyZcjn+/fvP5dvAQDIAWPL6AQBkHleey+xD+TySRUuRwLALnSCAAAAp9AJYr9hj8MCAMAqyU6QQyfoBAGQGbr6I9p1uEOStIB9IEDWSu4Eae8LuxwJAADIdskiSEdfRAPRmMvRZCeKIAAA17ATBECm+e/9JxSLG00YWaAxpfluhwPAJslOkE46QQAAgM1K8vOU50uM2T3ezRsw7EARBADgmmQnyPGesHoGoi5HAwAf77V3E6Ow6AIBsluqE4SdIAAAwGYej+fUXhBGYtmCIggAwDUl+XkqDiXWUx1upxsEQPpL7gNZwD4QIKuxEwQAADipgr0gtqIIAgBwVW15ohvkIHtBAKS5kz1h7WnulCQtmEgnCJDNkkUQOkEAAIATUp0g3RRB7EARBADgqrFl7AUBkBl+v++4jJEuGl2YWl4IIDvRCQIAAJw0ik4QW1EEAQC4qraMThAAmSE5Cuty9oEAWa+0ICCJIggAAHBGsgjSRieILSiCAABcRScIgEzBPhAgdyQ7QboHoorE4i5HAwAAsh2dIPaiCAIAcFVqJ8hJOkEApK9jnf1691i3PB7psonlbocDwGbFIX/qvzvpBgEAADZL7QShCGILiiAAAFeNHRyHRScIgHTWuDfRBTK1ujg1JgdA9vL7vCoKJgohjMQCAAB2S3WCMA7LFhRBAACuSo7D6uiLqLOfFxkApKfX3mUfCJBrSgoSI7HaKYIAAACbVdAJYiuKIAAAV40I+lU+IvGu6kMn6AYBkH5icaPNb7VIkhZeNMrlaAA4JbkXhE4QAABgt2QnSG84pp6BqMvRZB+KIAAA19UOdoOwFwRAOvr9vuNq6w6rtCBPC+gEAXJG6WAnSEcvRRAAAGCvEUG/CgI+SVIbI7EsRxEEAOA69oIASGc/39UsSbp6aqXyfKTPQK6gEwQAADgptReEkViW41kcAMB1Y8sHO0FO0AkCIL3E4kY/fyNRBPncjGqXowHgpJL8xLjOdjpBAACAA0axF8Q2FEEAAK6rpRMEQJr67/0n1NY9oOKQX1dMqnA7HAAOohMEAAA4KdUJwjgsy1EEAQC4buzgTpBD7AQBkGZ+vuuoJOmzU6sU8JM6A7kkuROkvS/sciQAACAXMA7LPjyTAwC4rrb8VCeIMcblaAAgIf6+UVjXzqxyORoATkt2gnTSCQIAABxQwTgs21AEAQC4bkxpohOkeyDK3G0AaWNb00kd6xpQUcivKy5kFBaQa0oHiyDkJgAAwAnJTpA2xmFZjiIIAMB1oTyfRg8+2LMXBEC6eOn1wVFYl1Qq6Pe5HA0Ap7ETBAAAOInF6PahCAIASAvJvSAH2QsCIA0kRmEliiDXzKh2ORoAbihJ7QShCAIAAOzHThD7UAQBAKSFU3tBKIIAcN+OgyfV0jmgwqBfCyczCgvIRe/vBGFnGQAAsFuqCNI9QO5hMYogAIC0kOoEOcE4LADue+n1xEL0uktGMwoLyFGlBQFJUjgaV38k7nI0AAAg240sTOQekZhhHKfFKIIAANJCbVmiE4RxWADc1heO6cU/HpEkfY5RWEDOGhHwyef1SGIvCAAAsF/Q71Pp4DhORmJZiyIIACAtjC1LjsOiEwSAu/6lcb/augc0tixfV1082u1wALjE4/GoND+5FyTscjQAACAXVLAc3RYUQQAAaaG2PDEO69DJXmZfAnBNZ39Ej//ne5Kkb37mIgX8pMtALkvtBemlEwQAANhvVOGpvSCwDs/qAABpobokXx6P1B+Jq62bd1sCcMcPf7dP7b0RTRo1Qn/xiTFuhwPAZSUFyU4QiiAAAMB+qeXodIJYiiIIACAtBPxeVReHJLEXBIA7TvSE9cNX9kmS6j97sfw+UmUg16U6QSiCAAAAB6SKIHSCWIpndgCAtDG2nL0gANzzxH++p+6BqKbVFOtz06vcDgdAGihlHBYAAHBQdUnizaHb9p90OZLsQhEEAJA2xpYl9oLsPtLpciQAck1LZ7+eem2/JOl/X32xvF6PuwEBSAt0ggAAACddN6tGAZ9XfzhwUlv3nXA7nKxBEQQAkDY+PWW0JOlfGvfrWFe/y9EAyCWP/eYdDUTjmju+TH928Si3wwGQJkoKApKk9j72lQEAAPtVFof0V3PGSpK+v+Vdl6PJHhRBAABp49oZ1ZpVW6recEyP/vodt8MBkCN+8oeDeub3TZKk/73oYnk8dIEASDjVCRJ1ORIAAJArvnblRHk90pa3W/XmkQ63w8kKFEEAAGnD4/HonmsvkSRt2NqkP7V0uRwRgGz35O/26ls/fV1xI31x/jhdNnGk2yEBSCPJnSDvHutWNBZ3ORoAAJALxo8coT+fWSNJ+v6W91yOJjtQBAEApJVPTijX/5xWpbiRVm3c43Y4ALKUMUZrfvG2Hngp8e/MLQsv0Hevn+5yVADSzbwLypWf59Oeo51a+eKbMsa4HRIAAMgBX/+zSZKkn+86qn1tPS5Hk/koggAA0s4dn5siv9ejl99u1SvvtLkdDtLQ2rVrNWHCBIVCIc2fP19bt279yOt/8pOfaMqUKQqFQpoxY4Y2btzoUKRIRyd7wrrr+Tf0vZcTM3a/tehi3XXNJYzBAvAhteUF+r//a7Y8HumZ3zfpB7/d63ZIAAAgB1xSXaxPTxmtuJH+6T/pBjlfFEEAAGnngooR+pvLxkuSHnhpt2Jx3nWJU5599lnV19eroaFB27dv16xZs7Ro0SIdO3bstNe/9tpruummm/SlL31JO3bs0PXXX6/rr79eb7zxhsORw03xuNHv3mnV8me2a/6Dm/XjrU3yeKQHrp+u2666kAIIgDO6elqV7rl2qiRp1c/f0sZdR12OCAAA5ILbrkp0g/zb9kM62tHncjSZzWMyoJ+3s7NTJSUl6ujoUHFxsdvhAAAccLInrP/xf15WV39U186s1tzxZZo0qlAXji5UaUGevB6PPB7Jo8SvcWNkjHSmR7V0eX0zlOez7L5y9fFx/vz5+uQnP6nvfe97kqR4PK7a2lp94xvf0J133vmh6xcvXqyenh797Gc/S9122WWXafbs2XriiSfO6ntafdatXQPq6Auf9/1kquTfU/Ohz4f+BU7+/fYosTPI65F8Xs+pv/8ez+DvSXEj9Udigx9xdfVHdPBEr/Yf79WB4z1680injnb0p+57+phi/d1nJqtuaqXtPy+AzGeM0X0vvqmnGg8o6PfqkRtn64KKESopyFNxyC+/16vecFS94Zh6wlENROLyeDQkX/F6B3/1uJuXXDi6yLL7ytVcxA2cNQDkphv/qVFb953QJ8aV6s8mj9bFVYW6uKpYo4uCqTwj+evHvS7yQW7lI0G/17I3oZ3t46Pfku8GAIDFykYE9M3PXKQHXtqjl14/qpdez/x3XZYW5GnnyqvdDiOjhcNhbdu2TStWrEjd5vV6VVdXp8bGxtN+TWNjo+rr64fctmjRIr3wwgtn/D4DAwMaGBhIfd7Z2Xl+gX/Ak7/bq39ipIrjikJ+/cUnxujGubWaPqbE7XAAZBCPx6OV103ToZN92vzWMd32zHa3QzonPq9H7z14jdthAACAs/SNT1+oJT/cqh1N7drR1O52OJbY/Z1FKgg4W5agCAIASFtf+tQFGj9yhHY0ndS7x7r1Xmu3DhzvVZTxWDmrra1NsVhMlZVD371fWVmpt95667Rf09zcfNrrm5ubz/h9Vq1apW9/+9vnH/AZhPJ8KivIs+3+3XSmv50ffJ9P8p0/ntTnp7kvk7g/Y4ziJvHOpnj81H+bwW9oZOSRR8E8r0J5PoXyvBoR8Ku2vEATRhZo/MgRuqBihOaML7O0GwtAbvF5PfrHmz6he194QzsPtauzL6rOvojCsXjqmvw8nwoCPgX93sF/vxL/RsUH35WZ+Pcs8fkH/91z4s2YPm+atMYCAICzsvCiUfrp1xZoe9NJvd3crbdbOvVOS7cGovGP/2KkUAQBAKQtj8ejz06t1GffN64mGosrHIufehE0nnhxwetNjMVJtoG+3wdbQSmh4OOsWLFiSPdIZ2enamtrLbv/2z87Wbd/drJl9wcAcMaIoF+PLJ495Lb+SEzRuFFBnk9eigwAAMBicyeUa+6E8tTnsbjRQDQ2+CYLk/r1fF8XcSqLyXfhjWkUQQAAGcXv88rv87odBlxSUVEhn8+nlpaWIbe3tLSoqqrqtF9TVVU1rOslKRgMKhgMnn/AAICsR4cZAABwks/rcXycVKbjVSQAAJAxAoGA5syZo82bN6dui8fj2rx5sxYsWHDar1mwYMGQ6yXpV7/61RmvBwAAAAAA2YOSEQAAyCj19fVaunSp5s6dq3nz5unRRx9VT0+Pli1bJkm6+eabNWbMGK1atUqS9M1vflNXXnml/uEf/kHXXnutNmzYoD/84Q/6wQ9+4OaPAQAAAAAAHEARBAAAZJTFixertbVVK1euVHNzs2bPnq1Nmzallp83NTXJ6z3V7Hr55ZfrmWee0T333KO77rpLF110kV544QVNnz7drR8BAAAAAAA4xGPMB9eipJ/Ozk6VlJSoo6NDxcXFbocDAEBa4PHROZw1AAAfxuOjczhrAAA+7GwfH9kJAgAAAAAAAAAAshJFEAAAAAAAAAAAkJUoggAAAAAAAAAAgKxEEQQAAAAAAAAAAGQliiAAAAAAAAAAACArUQQBAAAAAAAAAABZiSIIAAAAAAAAAADIShRBAAAAAAAAAABAVqIIAgAAAAAAAAAAshJFEAAAAAAAAAAAkJUoggAAAAAAAAAAgKxEEQQAAAAAAAAAAGQliiAAAAAAAAAAACArUQQBAAAAAAAAAABZiSIIAAAAAAAAAADIShRBAAAAAAAAAABAVqIIAgAAAAAAAAAAshJFEAAAAAAAAAAAkJX8bgdwNowxkqTOzk6XIwEAIH0kHxeTj5OwD7kIAAAfRi7iHHIRAAA+7GxzkYwognR1dUmSamtrXY4EAID009XVpZKSErfDyGrkIgAAnBm5iP3IRQAAOLOPy0U8JgPeshGPx3XkyBEVFRXJ4/Gc8/10dnaqtrZWBw8eVHFxsYUR5ibO01qcp7U4T+twltay8jyNMerq6lJNTY28XiZc2olcJD1xntbiPK3FeVqHs7QWuUhmIhdJT5yntThPa3Ge1uEsreVGLpIRnSBer1djx4617P6Ki4v5A2shztNanKe1OE/rcJbWsuo8edelM8hF0hvnaS3O01qcp3U4S2uRi2QWcpH0xnlai/O0FudpHc7SWk7mIrxVAwAAAAAAAAAAZCWKIAAAAAAAAAAAICvlVBEkGAyqoaFBwWDQ7VCyAudpLc7TWpyndThLa3GeuY3//9biPK3FeVqL87QOZ2ktzjO38f/fWpyntThPa3Ge1uEsreXGeWbEYnQAAAAAAAAAAIDhyqlOEAAAAAAAAAAAkDsoggAAAAAAAAAAgKxEEQQAAAAAAAAAAGQliiAAAAAAAAAAACArZV0RZO3atZowYYJCoZDmz5+vrVu3fuT1P/nJTzRlyhSFQiHNmDFDGzdudCjSzDCc81y3bp0WLlyosrIylZWVqa6u7mPPP9cM989n0oYNG+TxeHT99dfbG2AGGe5Ztre367bbblN1dbWCwaAmT57M3/f3Ge55Pvroo7r44ouVn5+v2tpa3X777erv73co2vT229/+Vtddd51qamrk8Xj0wgsvfOzXbNmyRZdeeqmCwaAuvPBC/ehHP7I9TtiHXMRa5CLWIhexDrmItchFrEMuAnIRa5GLWItcxFrkI9YhF7FOWuYiJots2LDBBAIBs379evPmm2+aW265xZSWlpqWlpbTXv/qq68an89nHn74YbN7925zzz33mLy8PLNr1y6HI09Pwz3PL37xi2bt2rVmx44dZs+ePeZv//ZvTUlJiTl06JDDkaen4Z5n0r59+8yYMWPMwoULzRe+8AVngk1zwz3LgYEBM3fuXHPNNdeYV155xezbt89s2bLF7Ny50+HI09Nwz/Ppp582wWDQPP3002bfvn3mF7/4hamurja33367w5Gnp40bN5q7777bPPfcc0aSef755z/y+r1795qCggJTX19vdu/ebR577DHj8/nMpk2bnAkYliIXsRa5iLXIRaxDLmItchFrkYvkNnIRa5GLWItcxFrkI9YhF7FWOuYiWVUEmTdvnrnttttSn8diMVNTU2NWrVp12utvvPFGc+211w65bf78+earX/2qrXFmiuGe5wdFo1FTVFRknnrqKbtCzCjncp7RaNRcfvnl5sknnzRLly7lwX7QcM/y8ccfNxMnTjThcNipEDPKcM/ztttuM5/+9KeH3FZfX2+uuOIKW+PMRGfzYP/3f//3Ztq0aUNuW7x4sVm0aJGNkcEu5CLWIhexFrmIdchFrEUuYh9ykdxDLmItchFrkYtYi3zEOuQi9kmXXCRrxmGFw2Ft27ZNdXV1qdu8Xq/q6urU2Nh42q9pbGwccr0kLVq06IzX55JzOc8P6u3tVSQSUXl5uV1hZoxzPc/vfOc7Gj16tL70pS85EWZGOJezfPHFF7VgwQLddtttqqys1PTp0/Xggw8qFos5FXbaOpfzvPzyy7Vt27ZUa+jevXu1ceNGXXPNNY7EnG14LMoe5CLWIhexFrmIdchFrEUu4j4ei7IHuYi1yEWsRS5iLfIR65CLuM+JxyK/Zffksra2NsViMVVWVg65vbKyUm+99dZpv6a5ufm01zc3N9sWZ6Y4l/P8oDvuuEM1NTUf+kOci87lPF955RX98Ic/1M6dOx2IMHOcy1nu3btXv/nNb/TXf/3X2rhxo959913deuutikQiamhocCLstHUu5/nFL35RbW1t+tSnPiVjjKLRqL72ta/prrvuciLkrHOmx6LOzk719fUpPz/fpcgwXOQi1iIXsRa5iHXIRaxFLuI+cpHsQS5iLXIRa5GLWIt8xDrkIu5zIhfJmk4QpJfVq1drw4YNev755xUKhdwOJ+N0dXVpyZIlWrdunSoqKtwOJ+PF43GNHj1aP/jBDzRnzhwtXrxYd999t5544gm3Q8tIW7Zs0YMPPqjvf//72r59u5577jm99NJLuv/++90ODQBSyEXOD7mItchFrEUuAiATkIucH3IR65GPWIdcJPNkTSdIRUWFfD6fWlpahtze0tKiqqqq035NVVXVsK7PJedynklr1qzR6tWr9etf/1ozZ860M8yMMdzzfO+997R//35dd911qdvi8bgkye/36+2339akSZPsDTpNncufzerqauXl5cnn86Vuu+SSS9Tc3KxwOKxAIGBrzOnsXM7z3nvv1ZIlS/TlL39ZkjRjxgz19PToK1/5iu6++255vdTXh+NMj0XFxcW88zLDkItYi1zEWuQi1iEXsRa5iPvIRbIHuYi1yEWsRS5iLfIR65CLuM+JXCRr/o8EAgHNmTNHmzdvTt0Wj8e1efNmLViw4LRfs2DBgiHXS9KvfvWrM16fS87lPCXp4Ycf1v33369NmzZp7ty5ToSaEYZ7nlOmTNGuXbu0c+fO1MfnP/95XXXVVdq5c6dqa2udDD+tnMufzSuuuELvvvtuKmGSpD/96U+qrq7O2Qf5pHM5z97e3g89oCeTqMTOKwwHj0XZg1zEWuQi1iIXsQ65iLXIRdzHY1H2IBexFrmItchFrEU+Yh1yEfc58lhk2Yr1NLBhwwYTDAbNj370I7N7927zla98xZSWlprm5mZjjDFLliwxd955Z+r6V1991fj9frNmzRqzZ88e09DQYPLy8syuXbvc+hHSynDPc/Xq1SYQCJif/vSn5ujRo6mPrq4ut36EtDLc8/ygpUuXmi984QsORZvehnuWTU1NpqioyCxfvty8/fbb5mc/+5kZPXq0eeCBB9z6EdLKcM+zoaHBFBUVmR//+Mdm79695pe//KWZNGmSufHGG936EdJKV1eX2bFjh9mxY4eRZB555BGzY8cOc+DAAWOMMXfeeadZsmRJ6vq9e/eagoIC861vfcvs2bPHrF271vh8PrNp0ya3fgScB3IRa5GLWItcxDrkItYiF7EWuUhuIxexFrmItchFrEU+Yh1yEWulYy6SVUUQY4x57LHHzLhx40wgEDDz5s0z//Vf/5X6vSuvvNIsXbp0yPX/+q//aiZPnmwCgYCZNm2aeemllxyOOL0N5zzHjx9vJH3oo6GhwfnA09Rw/3y+Hw/2Qw33LF977TUzf/58EwwGzcSJE813v/tdE41GHY46fQ3nPCORiLnvvvvMpEmTTCgUMrW1tebWW281J0+edD7wNPTyyy+f9t/C5BkuXbrUXHnllR/6mtmzZ5tAIGAmTpxo/vmf/9nxuGEdchFrkYtYi1zEOuQi1iIXsQ65CMhFrEUuYi1yEWuRj1iHXMQ66ZiLeIyhRwcAAAAAAAAAAGSfrNkJAgAAAAAAAAAA8H4UQQAAAAAAAAAAQFaiCAIAAAAAAAAAALISRRAAAAAAAAAAAJCVKIIAAAAAAAAAAICsRBEEAAAAAAAAAABkJYogAAAAAAAAAAAgK1EEAQAAAAAAAAAAWYkiCAAAAAAAAAAAyEoUQQAAAAAAAAAAQFaiCAIAAAAAAAAAALISRRAAAAAAAAAAAJCV/j9ru8hd/78s9AAAAABJRU5ErkJggg==",
"text/plain": [
"
"
]
@@ -215,7 +271,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0]"
+ "version": "3.9.16"
},
"orig_nbformat": 4,
"vscode": {
diff --git a/MindFlow/applications/cfd/sod/sod_tube_CN.ipynb b/MindFlow/applications/cfd/sod/sod_tube_CN.ipynb
index 8d0d9a88be64639270dfaea330960a938bde4d40..5b64f494b01b50fd743de4416a9f7b2ca7b7d416 100644
--- a/MindFlow/applications/cfd/sod/sod_tube_CN.ipynb
+++ b/MindFlow/applications/cfd/sod/sod_tube_CN.ipynb
@@ -55,9 +55,7 @@
"from mindflow.cfd.runtime import RunTime\n",
"from mindflow.cfd.simulator import Simulator\n",
"\n",
- "from src.ic import sod_ic_1d\n",
- "\n",
- "context.set_context(device_target=\"GPU\", device_id=3)"
+ "from src.ic import sod_ic_1d"
]
},
{
@@ -65,9 +63,17 @@
"cell_type": "markdown",
"metadata": {},
"source": [
- "## 定义Simulator和RunTime\n",
+ "## 设置 MindSpore 运行环境\n",
+ "\n",
+ "在运行程序之前,应配置context。`context.set_context`里面常用参数表示如下:\n",
+ "\n",
+ "`mode`表示运行的模式,'GRAPH'表示静态图模式, 'PYNATIVE'表示动态图模式,详见[MindSpore 官网](https://www.mindspore.cn/docs/zh-CN/r2.0/design/dynamic_graph_and_static_graph.html),默认值'GRAPH';\n",
"\n",
- "网格、材料、仿真时间、边界条件和数值方法的设置在文件[numeric.yaml](https://gitee.com/mindspore/mindscience/blob/master/MindFlow/applications/cfd/sod/numeric.yaml)中。"
+ "`save_graphs`表示是否保存计算图,默认值'False';\n",
+ "\n",
+ "`device_target`表示使用的计算平台类型,可以选择'Ascend'或'GPU',默认值'GPU';\n",
+ "\n",
+ "`device_id`表示使用的计算卡编号,可按照实际情况填写,默认值 0;"
]
},
{
@@ -75,8 +81,47 @@
"execution_count": 2,
"metadata": {},
"outputs": [],
+ "source": [
+ "context.set_context(mode=context.GRAPH_MODE, save_graphs=False, device_target=\"GPU\", device_id=0)"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 读取配置文件\n",
+ "\n",
+ "本程序提供了多种选项来配置网格、材料、仿真时间、边界条件和数值方法。这些配置可以在[numeric.yaml](./numeric.yaml)文件中进行设置。用户可以根据自己的需求选择不同的数值方法。本程序支持以下数值方法:WENO3、WENO5和WENO7三种重构格式,以及Rsuanov、HLLC和Roe三种Riemann求解器。\n",
+ "\n",
+ "除了在配置文件中直接进行设置外,还可以通过修改以下代码来选择要使用的数值方法。在下面的代码块中,第二和第三行是设置数值方法的位置。如果希望直接在配置文件中指定数值方法,请将这两行代码注释掉。"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [],
"source": [
"config = load_yaml_config('numeric.yaml')\n",
+ "config[\"space_solver\"][\"convective_flux\"][\"reconstructor\"] = \"WENO5\"\n",
+ "config[\"space_solver\"][\"convective_flux\"][\"riemann_computer\"] = \"Roe\""
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 定义Simulator和RunTime"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
"simulator = Simulator(config)\n",
"runtime = RunTime(config['runtime'], simulator.mesh_info, simulator.material)"
]
@@ -93,7 +138,7 @@
},
{
"cell_type": "code",
- "execution_count": 3,
+ "execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@@ -114,7 +159,7 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 6,
"metadata": {},
"outputs": [
{
@@ -122,40 +167,53 @@
"output_type": "stream",
"text": [
"current time = 0.000000, time step = 0.007606\n",
- "current time = 0.007606, time step = 0.005488\n",
- "current time = 0.013094, time step = 0.004744\n",
- "current time = 0.017838, time step = 0.004501\n",
- "current time = 0.022339, time step = 0.004338\n",
- "current time = 0.026678, time step = 0.004293\n",
- "current time = 0.030971, time step = 0.004268\n",
- "current time = 0.035239, time step = 0.004198\n",
- "current time = 0.039436, time step = 0.004157\n",
- "current time = 0.043593, time step = 0.004150\n",
- "current time = 0.047742, time step = 0.004075\n",
- "current time = 0.051818, time step = 0.004087\n",
- "current time = 0.055905, time step = 0.004056\n",
- "current time = 0.059962, time step = 0.004031\n",
- "current time = 0.063993, time step = 0.004021\n",
- "current time = 0.068014, time step = 0.004048\n",
- "current time = 0.072062, time step = 0.004039\n",
- "current time = 0.076101, time step = 0.004016\n",
- "current time = 0.080117, time step = 0.004049\n",
- "current time = 0.084166, time step = 0.004053\n",
- "current time = 0.088218, time step = 0.004045\n",
- "current time = 0.092264, time step = 0.004053\n",
- "current time = 0.096317, time step = 0.004062\n",
- "current time = 0.100378, time step = 0.004065\n",
- "current time = 0.104443, time step = 0.004068\n",
- "current time = 0.108511, time step = 0.004072\n",
- "current time = 0.112583, time step = 0.004075\n",
- "current time = 0.116658, time step = 0.004077\n",
- "current time = 0.120735, time step = 0.004080\n",
- "current time = 0.124815, time step = 0.004081\n",
- "...\n",
- "current time = 0.186054, time step = 0.004084\n",
- "current time = 0.190138, time step = 0.004084\n",
- "current time = 0.194222, time step = 0.004084\n",
- "current time = 0.198306, time step = 0.004085\n"
+ "current time = 0.007606, time step = 0.004957\n",
+ "current time = 0.012564, time step = 0.004426\n",
+ "current time = 0.016990, time step = 0.004285\n",
+ "current time = 0.021274, time step = 0.004200\n",
+ "current time = 0.025474, time step = 0.004197\n",
+ "current time = 0.029671, time step = 0.004117\n",
+ "current time = 0.033787, time step = 0.004086\n",
+ "current time = 0.037874, time step = 0.004124\n",
+ "current time = 0.041998, time step = 0.004134\n",
+ "current time = 0.046131, time step = 0.004123\n",
+ "current time = 0.050254, time step = 0.004096\n",
+ "current time = 0.054350, time step = 0.004096\n",
+ "current time = 0.058445, time step = 0.004082\n",
+ "current time = 0.062528, time step = 0.004082\n",
+ "current time = 0.066610, time step = 0.004079\n",
+ "current time = 0.070689, time step = 0.004071\n",
+ "current time = 0.074761, time step = 0.004079\n",
+ "current time = 0.078840, time step = 0.004079\n",
+ "current time = 0.082919, time step = 0.004084\n",
+ "current time = 0.087003, time step = 0.004088\n",
+ "current time = 0.091090, time step = 0.004094\n",
+ "current time = 0.095184, time step = 0.004100\n",
+ "current time = 0.099284, time step = 0.004103\n",
+ "current time = 0.103388, time step = 0.004096\n",
+ "current time = 0.107484, time step = 0.004105\n",
+ "current time = 0.111589, time step = 0.004106\n",
+ "current time = 0.115696, time step = 0.004097\n",
+ "current time = 0.119793, time step = 0.004090\n",
+ "current time = 0.123882, time step = 0.004087\n",
+ "current time = 0.127969, time step = 0.004080\n",
+ "current time = 0.132049, time step = 0.004078\n",
+ "current time = 0.136127, time step = 0.004072\n",
+ "current time = 0.140199, time step = 0.004074\n",
+ "current time = 0.144273, time step = 0.004074\n",
+ "current time = 0.148347, time step = 0.004077\n",
+ "current time = 0.152423, time step = 0.004077\n",
+ "current time = 0.156501, time step = 0.004077\n",
+ "current time = 0.160578, time step = 0.004083\n",
+ "current time = 0.164661, time step = 0.004085\n",
+ "current time = 0.168746, time step = 0.004088\n",
+ "current time = 0.172834, time step = 0.004091\n",
+ "current time = 0.176924, time step = 0.004091\n",
+ "current time = 0.181015, time step = 0.004092\n",
+ "current time = 0.185107, time step = 0.004090\n",
+ "current time = 0.189198, time step = 0.004088\n",
+ "current time = 0.193285, time step = 0.004090\n",
+ "current time = 0.197375, time step = 0.004090\n"
]
}
],
@@ -179,18 +237,26 @@
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAABkEAAAJtCAYAAACBs9diAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAACrx0lEQVR4nOz9eXzV933m/V/fs2qXQEILQuw7AoFxjGXsxE6ISZw4S5fxHad2h0nciWumbpjcTWhie7KZTpt4cv8yTpnQeOLeTSZOfSeOG7vOQky84Q0MZhH7IiHQjnbprN/fH2cRBAGS0Dmfs7yej4dKLOvAWzQBHV3n/b4s27ZtAQAAAAAAAAAAZBiH6QEAAAAAAAAAAAASgRAEAAAAAAAAAABkJEIQAAAAAAAAAACQkQhBAAAAAAAAAABARiIEAQAAAAAAAAAAGYkQBAAAAAAAAAAAZCRCEAAAAAAAAAAAkJEIQQAAAAAAAAAAQEZymR5gLMLhsM6ePavCwkJZlmV6HAAAUoJt2+rr69P06dPlcPC6hkTiaxEAAC7F1yLJw9ciAABcaqxfi6RFCHL27FnV1NSYHgMAgJTU1NSkGTNmmB4jo/G1CAAAl8fXIonH1yIAAFze1b4WSYsQpLCwUFLkkykqKjI8DQAAqaG3t1c1NTXxvyeROHwtAgDApfhaJHn4WgQAgEuN9WuRtAhBYqueRUVF/GUPAMAf4CRC4vG1CAAAl8fXIonH1yIAAFze1b4W4WgnAAAAAAAAAADISIQgAAAAAAAAAAAgIxGCAAAAAAAAAACAjEQIAgAAAAAAAAAAMhIhCAAAAAAAAAAAyEiEIAAAAAAAAAAAICMRggAAAAAAAAAAgIxECAIAAAAAAAAAADISIQgAAAAAAAAAAMhIhCAAAAAAAAAAACAjEYIAAAAAAAAAAICMRAgCAAAAAAAAAAAyEiEIAAAAAAAAAADISIQgAAAAAAAAAAAgIxGCAAAAAAAAAACAjEQIAgAAAAAAAAAAMhIhCAAAAAAAAAAAyEiEIAAAAAAAAAAAICMRggAAAAAAAAAAgIw07hDkpZde0p133qnp06fLsiw988wzV33Mjh07dN1118nr9Wr+/Pn64Q9/OIFRAQAAAAAAzOL7IgAApJdxhyADAwOqq6vT448/PqaPP3nypD7ykY/otttu0549e/TXf/3X+uxnP6tf/epX4x4WAAAAAADAJL4vAgBAenGN9wEf/vCH9eEPf3jMH79161bNmTNH3/72tyVJS5Ys0SuvvKL/8T/+h9avXz/eXx6G2batUNiWJFmWJYcV+REAAADIRuGwLYeDr4eBbML3RbKbbdsK25EfHZYli++LAEDKG3cIMl47d+7UunXrLnrf+vXr9dd//deXfYzP55PP54v/c29v76TO9PAv9uupt5om9edMN7G/ny1ZF/2zJNm2ZMuO/Bj9z2Fb8fBjNG6npcIctwpzXCrMcWlKnkdLq4pUW12s2upizZqax5NDAAAApBV/MKzGrgEdaxvQ8fZ+HW/v17nuYZ0f9Kt7MKDuIb+GA2G5HJY8Loe8Loe8LqcKol8Tx74+9jodcjktuZwOuR2WbEmBUFiBkK1AKKxg2FYoZCtk2wqHbdmKfH3tcjrkcTrkdlrK87iU73Uqz+NSnscpr8spj8sReXM6JNmRnydsKxCy5QuGNOgLadAf0qA/qEF/SAP+oAZ9kR99wXB03sjMXnfs14q9WQqEbA0HQxr2hzQcDMkfDMsfshUMhRUIhePPD2LPEhyWJa/LoRy3U7lup3LcThV4Y78fbuV7XXI7LDksSw5H5AVVweisvmBYw4GQBvwh9Q0H1DccVN9wUL5gSO7Y74Mr8vvniP0cluR0ODRjSq7mlxdofnmBZk3Nk8tJ9SVSSyp+X+R//f64vvu7Y5P6c6YbK/5/Ij9cGGTYduTPYtuWwrYdeQtLoQteGHrRz2VJbqdDU/M8mprvUWmBR9MKvXrP7Km6ZUGZZkzJS8anBAC4jISHIC0tLaqoqLjofRUVFert7dXQ0JByc3MvecyWLVv01a9+NWEzRZ4UhBP282ejQMhW14BfXQP++PtePtoR/8+FOS796eoa3X/rPE0r9JoYEQAAAFnKHwzrWFu/GrsGFAhFvpkVigYGw8GwfIGQhgMhDQVCOtczrDNdQ2o6P6iW3mHZl38dUFwwbCvojwQOUkCa3O9VYhzcTks5bqcU/calLen9i8v1P+++zvRoyGKp+X2RsPp9wYT9/NnGtiN/17T0Dquldzj+/p/tbpYkzS3L1y0LynRP/SzNLy80NSYAZK2EhyATsXnzZm3atCn+z729vaqpqZm0n/+LH1qkje+fP2k/XzLZtn3Na5Z29Jnc1Z7QORxW9NUQktOy4uevHNFf39bIqyP8wXD01VqRV2219g5r/9ke7WvuVcO5XvUNB/XEqyf1f95s1L03zdJ/fu88Tc33XNPnAQAAAFzOi4fb9G97z6rhXJ+OtfUpEBpDmjGKfI9T88oLNG9ageZNy1fN1DxNyfOoJM+tKXke5XmcCoRs+YNh+YIhDQfC6vONbDL0Dwfkj259BEO2guGwLEVeMeyKblw4HZZc0Q0HV3R7OhDfuLDlD4U16A9qwBfZ6hjwR7cyYm+hyM/pdFhyOS05HZHNiQKvU3lel/I9TuV6XCrwRn7M90S2SAKhsHyBcHwLIxiOfB6B6KaHy+mIbnREtjtimyIupyWP0xF/viBFXkEdCtvR34PI78OgP6QBX1D9vujvhS+gUDiyZR4LotxOR3wjJSe6PVJ0wRZNjtt50dZMIBSOvio78nP4AmGd7hzQsfZ+HW8b0FAgpEDo4m/s/vLdc/rmJwMqznVP/L9QQJIl+vsif3bjLH10xfRJ+/lMG+u3SWLfB7Hj/3zxRtuFHxf7/kfs5JXTEfnz2mFFfrSijwvbkUsavmBIXQN+dQ741dXvV2PXoF491qF3mrp1omNAJzoG9H/eatLfrF+k/7R2DtcyACCJEh6CVFZWqrW19aL3tba2qqioaNRXO0iS1+uV15u4bYGSPI9K2ERMmkAorFePdeh//Pao9jZ163/9/oT+ZedpfeWjS/WpG2aaHg8AAAAZpGvAr//27AE9u/fsRe8vzHFpfnmBvC7HRd/Ein/z3RX5Zn95UY5mTMlVzdQ81UzJU1mBh1vvaSIctnWud1i+QEiWFfkG5Se+96q6BwNqPj9ECAJjUvf7IrwwcbL94dmrz39woXqHA3rtWKd+9MZpvXy0Q994rkHbG9r0rf9Qp+qS0f//DwCYXAkPQerr6/X8889f9L7f/OY3qq+vT/QvjRThdjp066JyvW/hNL14uE2P/eaI9jf36m9/vk9T8z1av6zS9IgAAADIAM/vO6eHf7FfHf1+OSzpnhtnae38Mi2dXqTqklzCjAzncFiXfENxxpTcSAjSPaSl04sMTYZsx/dFsltRjlsfqq3U+mUV+tEbjfrmcw3aeaJTH/ofL+nv/2SFPry8yvSIAJDxxt0Y19/frz179mjPnj2SpJMnT2rPnj1qbGyUFFnZvPfee+Mf/7nPfU4nTpzQ3/zN3+jQoUP63ve+p5/+9Kf6/Oc/PzmfAdKGZVl6/+IK/dvGm/VnN86UbUsP/uQdvXum2/RoAAAASGO2beu//nSv/vJHu9XR79eC8gL97C/X6qsfr9Xtyyo1Y0oeAUiWioUizecHDU+CTML3RTARlmXpz26cpecfvEWrZpaozxfUg0/t0cmOAdOjAUDGG3cI8vbbb2vVqlVatWqVJGnTpk1atWqVHn74YUnSuXPn4n/xS9KcOXP03HPP6Te/+Y3q6ur07W9/W//0T/+k9evXT9KngHRjWZb+253L9L6F0zQcCOszT76t5u4h02MBAAAgTf1sd7P+v91n5HRY2njbfP3yr27WypoS02MhBVRH7yDzfAOTie+L4FrMKcvXv/7net2yoEz+YFhf/vm+eDcJACAxLDsN/qTt7e1VcXGxenp6VFTECnOm6BsO6E+37tShlj4trizUv36uXoU53OkFgLHi78fk4fcaSF09gwG9/9s71Dng1xc/tFj33zrP9EhIIT945aS+/suDumN5pb736dWmx8k4/P2YPPxeZ57GzkHd/p3fazgQ1rf+tE5/snqG6ZEAIO2M9e/HcW+CAJOlMMetH/zH92haoVeHWvr0+af2mh4JAAAAaeZbvz6szgG/5pcX6DM3zzE9DlLMyDksNkEApJaZpXn663ULJUnfeO6gOvt9hicCgMxFCAKjqkty9YM/v15up6XfNrRq1+ku0yMBAAAgTbx7plv/8sZpSdLXP14rj4unN7jYjCnREIRzWABS0GdunqMlVUXqHgzoG881mB4HADIWzxJg3IoZJfrj6yJrn/+447jhaQAAAJAOQmFbX3lmv2xb+uSqatXPKzU9ElJQbBOko9+v4UDI8DQAcDG306G/+6Plsizp5+8066Uj7aZHAoCMRAiClPAX750ry5J+29Cmwy19pscBAABAivvxm41690yPCr0ubb5jselxkKJK8tzK8zglsQ0CIDXV1ZToP940W5L0lWf2KxAKmx0IADIQIQhSwtxpBbqjtkqStPX3bIMAAADg8s4P+PUPLxySJH1h/SKVF+YYngipyrIsekEApLz/evsiTc33qLFrUK8c6zA9DgBkHEIQpIz7b50nSXp271k1dQ0angYAAACp6t/ePave4aAWVRTqz26cZXocpLhqekEApLgCr0sfXRF5Yeize84angYAMg8hCFJGbXWxbllQplDY1raXT5geBwAAACnqmXeaJUn/4T01cjosw9Mg1bEJAiAdfHzldEnSrw+0aMhPhxEATCZCEKSU2DbIU281qb3PZ3gaAAAApJrGzkHtbuyWZUl3Rl81C1wJmyAA0sF1M6eouiRXA/6QfneozfQ4AJBRCEGQUurnlqqupkS+YFj/+9WTpscBAABAivm3dyNnQm6aV6ryIrpAcHVsggBIB5Zl6WPRbZBf7Gk2PA0AZBZCEKQUy7L0l9FtkP9352kN+IKGJwIAAECqsG07fgrr4yurDU+DdDGDTRAAaeJjdZEQZMfhdvUMBQxPAwCZgxAEKeeDSyo0uzRPfb6gftvQanocAAAApIiGc3062tYvj8uhD9VWmh4HaaK6JE+S1NI7rGAobHgaALi8xZWFWlBeIH8orF8daDE9DgBkDEIQpByHw9Kd0Vc/PPfuOcPTAAAAIFX8Ym9kC+T9i8pVlOM2PA3SRXmhV26npVDYVkvvsOlxAOCyLMuKF6Q/u+es4WkAIHMQgiAlfSRacrnjSLv6hlkBBQAAyHbhsK1/i35DKPYNImAsHA5L0+kFAZAmYi8Kfe14h9r6CG4BYDIQgiAlLaoo1Lxp+fIHw5zEAgAAgN461aWzPcMq9Lp02+Jy0+MgzcTL0ekFAZDiZpXmq66mRGFbep7rGAAwKQhBkJIsy9JHV3ASCwAAABG/2BvZAvlQbaVy3E7D0yDdVLMJAiCNfDy6DRL7uw8AcG0IQZCyYiexfn+kXT1DnMQCAADIVv5gWM/vi7ww5uMrqw1Pg3RUPYVNEADp46MrquSwpHcau9XUNWh6HABIe4QgSFkLKwq1sKJAgZCt3xzkJBYAAEC2euVYu7oHA5pW6FX9vFLT4yANcQ4LQDopL8rR9bOnSpJePtpheBoASH+EIEhpIyexWAEFAADIVi8eapckfWhZpZwOy/A0SEfxTRDOYQFIEzdFQ/+dJzoNTwIA6Y8QBCntjuWRk1gvH+1Q96Df8DQAAAAw4ZVjkVfB3rKgzPAkSFczSvIkRTZBbNs2PA0AXN2NcyMhyOsnOvlzCwCuESEIUtr88gItrixUMGzr1wc4iQUAAJBtzpwf1MmOATkdlm7kFBYmqLI4R5Yl+YJhdfTz4ioAqW9lTYk8Lofa+3w60TFgehwASGuEIEh5d9ZFTmL9MlqGCQAAgOzxanQLpG5GsYpy3IanQbryuByqKMyRRC8IgPSQ43Zq9cwpkqSdxzmJBQDXghAEKS92EuvVYx3qGuBVWwAAANkkVgh784JphidBuqMXBEC6ufAkFgBg4ghBkPLmlOVr2fQihcK2ftvASSwAAIBsEQ7bei366lf6QHCtqkuiIUj3oOFJAGBs6ufFQpAuekEA4BoQgiAtfGBJhSTp94fbDU8CAACAZDl4rlddA37le5xaWVNiehykOTZBAKSbuppieV0OdfT7dLy93/Q4AJC2CEGQFm5dFDl/8PLRdgVDYcPTAAAAIBleifaB3Di3VG4nT11wbUY2QQhBAKQHr8up62fTCwIA14pnEkgLdTNKVJLnVu9wUHuauk2PAwAAgCSIlaLfzCksTILYJsgZNkEApJEb54ycxAIATAwhCNKC02HplmgZ5g5OYgEAAGS84UBIb56MfMPn5vmEILh2M9gEAZCGRnpBOukFAYAJIgRB2rh1YTQEOdJmeBIAAAAk2tunzssXDKuiyKv55QWmx0EGiG2C9A0H1TscMDwNAIzNihklynU71Tng19E2ekEAYCIIQZA23hsNQfY396qtb9jwNAAAAEikWB/IzfOnybIsw9MgE+R5XCrJc0uSznXzfAJAevC4HPSCAMA1IgRB2phW6NXy6mJJ0ktHOgxPAwAAgER65VjkBOrNC0oNT4JMku9xSZKGAiHDkwDA2N04d+QkFgBg/AhBkFZuXRTrBeEkFgAAQKbqGvDrwNleSdJa+kAwibzuyFNgfzBseBIAGLsb506VFAlBwmF6QQBgvAhBkFZiIcjLRzsUDPHEBQAAIBO9drxDti0trixUeWGO6XGQQTxOQhAA6SfWC3J+MKAjbX2mxwGAtEMIgrSysmaKinPd6hkKaO+ZbtPjAAAAIAFePRY598EWCCab1xUNQUKcwwKQPtzOkV6Q1+kFAYBxIwRBWnE6LN2yIPJkeMfhdsPTAAAAIBHeOtUlaeQGOjBZPC42QQCkp+tnRU5i7WnqNjsIAKQhQhCknVsXlUsiBAEAAMhEXQN+HWvrlyStnjXF8DTINLEQxEcIAiDNrJxZIokQBAAmghAEaee9CyObIPuae9Te5zM8DQAAACbTrtPnJUnzyws0Nd9jeBpkmlgnCCEIgHSzckaJJOlU56DOD/jNDgMAaYYQBGmnvDBHy6YXSZJeOsI2CAAAQCaJncJ6z+yphidBJuIcFoB0VZzn1tyyfEnSHjpSAWBcCEGQlm5dNE2S9MqxDsOTAAAAYDKNhCCcwsLk87ickghBAKSnlTUlkqQ9jd1G5wCAdEMIgrS0dn7kJNYrxzpk27bhaQAAADAZhvwh7W/ukcQmCBIjdg7LHyIEAZB+6AUBgIkhBEFaum7mFHldDrX3+eLFmQAAAEhve890KxCyVVHk1YwpuabHQQbiHBaAdBbbBNl7ppsXhALAOBCCIC3luJ26YU7k1YGcxAIAAMgMb50c6QOxLMvwNMhEXkIQAGlscWWRPC6HugcDOtU5aHocAEgbhCBIW7GTWK8e6zQ8CQAAACbDW6fPS+IUFhInvgnCOSwAacjjcqh2epEkaU/TecPTAED6IARB2lo7LxKCvH6iU0GexAAAAKS1UNjW7mgIcj2l6EiQeCcImyAA0tTKmsjfkZSjA8DYEYIgbS2dXqSSPLf6fUHtPdNjehwAAABcg4Zzver3BVXodWlxZZHpcZChYuewfIQgANIU5egAMH6EIEhbToel+rmlkqTX6AUBAABIa2+fivSBXDdripwO+kCQGBSjA0h3q6Ll6AfP9Wo4EDI7DACkCUIQpLVYLwjl6AAAAOltpA+EU1hIHDpBAKS7GVNyVZrvUSBk6+C5XtPjAEBaIARBWrs5GoK809itQX/Q8DQAAACYCNu245sg11OKjgQa2QTh1dMA0pNlWVoZ3QahFwQAxoYQBGltVmmeqkty5Q+F9dap86bHAQAAwAQ0dQ2ptdcnt3PkGztAIsSK0ekEAZDO4iEIvSAAMCaEIEhrlmVp7fxIL8irnMQCAABIS29Ft0CWVxcrx+00PA0yGZ0gADIB5egAMD6EIEh7sV4QQhAAAID09PbpSAjyHk5hIcG8hCAAMsCKGSWSpMauQXX2+8wOAwBpgBAEae+meZEQ5MDZXnUN+A1PAwAAgPGKnTVdPYtSdCQWxegAMkFxrltzp+VLkvae6TY7DACkAUIQpL1phV4triyUJO083ml4GgAAAIxHz2BAx9r6JUnXEYIgwTzOyLk1NkEApDvK0QFg7AhBkBFi2yCvcBILAAAgrbzTFNkCmVWap7ICr+FpkOnoBAGQKVZFQ5C9Z3rMDgIAaYAQBBkhVo6+8zghCAAAQDrZHX0F63Uz2QJB4sVCEB8hCIA0tzzaC7K/uUe2bZsdBgBSHCEIMsINc6bK6bB0qnNQzd1DpscBAADAGL3TGNkEuW5midlBkBU8TjpBAGSGxZWFcjksdQ74dbZn2PQ4AJDSCEGQEQpz3FpeXSyJXhAAAIB0EQ7b8Vvmq9gEQRJwDgtApshxO7WwItKPuo+TWABwRYQgyBg3zYucxHqNk1gAAABp4Whbv/p8QeV5nFpcWWh6HGQBLyEIgAwSezHo/mZCEAC4EkIQZIxYOfrrxzu5hwkAAJAGdkdPYa2YUSyXk6cmSLx4CMI5LAAZYPmMSAjyLiEIAFwRzzSQMVbPmiKP06GzPcM63TloehwAAABcxe7TsT4QTmEhOWLnsEJhW0GCEABp7sJNEF4MCgCXRwiCjJHrcWpltFDzNXpBAAAAUt7uRkIQJFcsBJHYBgGQ/hZXFcrttNQ14Fdz95DpcQAgZRGCIKPQCwIAAJAeegYDOt4+IElaFX0hC5BongvOrtELAiDdeV0j5ej0ggDA5RGCIKPEekF20gsCABnt8ccf1+zZs5WTk6M1a9bozTffvOLHf+c739GiRYuUm5urmpoaff7zn9fw8HCSpgUwmneaIlsgs0vzVFrgNTwNsoXL6ZDDivxnQhAAmWBFrBfkDCEIAFwOIQgyysqaEuW4Heoc8OtIa7/pcQAACfDUU09p06ZNeuSRR7R7927V1dVp/fr1amtrG/Xjf/zjH+tLX/qSHnnkETU0NOgHP/iBnnrqKf3t3/5tkicHcKHdjd2SOIWF5IudxPIRggDIALXRXpB9bIIAwGURgiCjeFwOvWf2VEnSTk5iAUBGeuyxx3Tfffdpw4YNWrp0qbZu3aq8vDw98cQTo378a6+9prVr1+ruu+/W7Nmzdfvtt+tTn/rUVbdHACTWO9E+kFWzCEGQXLGTWHSCAMgEK6pLJEVCEC5iAMDoCEGQcerjvSCUowNApvH7/dq1a5fWrVsXf5/D4dC6deu0c+fOUR9z0003adeuXfHQ48SJE3r++ed1xx13JGVmAJcKh23tiW+ClBidBdnH43JK4hwWgMywsLJAbqel7sGAzpynHB0ARuMyPQAw2SK9IIf1+olOhcK2nLGjvwCAtNfR0aFQKKSKioqL3l9RUaFDhw6N+pi7775bHR0duvnmm2XbtoLBoD73uc9d8RyWz+eTz+eL/3Nvb+/kfAIAJElH2/rV5wsqz+PUomihK5As3ug5LEIQAJnA63JqcWWR9jX3aF9zj2qm5pkeCQBSDpsgyDi104tU6HWpdziog2f5phUAZLsdO3bo0Ucf1fe+9z3t3r1bP/vZz/Tcc8/p61//+mUfs2XLFhUXF8ffampqkjgxkPl2R09h1c0okcvJUxIkV6wThHNYADIFvSAAcGU840DGcTkdWjM30gvyGr0gAJBRysrK5HQ61draetH7W1tbVVlZOepjHnroId1zzz367Gc/q+XLl+uTn/ykHn30UW3ZskXh8OjfANu8ebN6enrib01NTZP+uQDZbPfpSAhy3awSs4MgK8U7QdgEAZAhVsyIhiBnCEEAYDSEIMhIN86lFwQAMpHH49Hq1au1ffv2+PvC4bC2b9+u+vr6UR8zODgoh+PiL3mczsg9+MuVR3q9XhUVFV30BmDyxDZBrptJKTqSz8M5LAAZZvkFmyCUowPApegEQUaK9IJIb53qUiAUlpszCwCQMTZt2qQ///M/1/XXX68bbrhB3/nOdzQwMKANGzZIku69915VV1dry5YtkqQ777xTjz32mFatWqU1a9bo2LFjeuihh3TnnXfGwxAAydMzGNDx9gFJ0ipCEBgQ6wTxEYIAyBALKwrlcTrUMxRQU9eQZpbSCwIAFyIEQUZaXFmoKXlunR8M6N0z3Vo9a6rpkQAAk+Suu+5Se3u7Hn74YbW0tGjlypV64YUX4mXpjY2NF21+fOUrX5FlWfrKV76i5uZmTZs2TXfeeae++c1vmvoUgKy290y3JGlWaZ6m5nvMDoOsRCcIgEzjcTm0uKpQ756JlKMTggDAxQhBkJEcDkv180r1/L4WvXaskxAEADLMxo0btXHjxlH/3Y4dOy76Z5fLpUceeUSPPPJIEiYDcDV7mrolSStrSozOgezFOSwAmWh5dbHePdOjd5u79ZEVVabHAYCUwo0gZKz6aC/IzhP0ggAAAKQKQhCYFitG9wVDhicBgMkT7wWhHB0ALkEIgoxVH+0Fefv0eQ0HeIIDAABgmm3beidaik4IAlPYBAGQiWqjIch+ytEB4BKEIMhY86blq7zQK38wrN3RJ9sAAAAwp7FrUOcHA/I4HVo6vcj0OMhShCAAMlGsHL13OKimriHT4wBASiEEQcayLEs3zYuexDrOSSwAAADTYqewlkwvktflNDsMspaXEARABoqVo0vSvmZOYgHAhQhBkNHqCUEAAABSxjuN3ZKkVZzCgkGxThB/iBAEQGZZNj3aC0IIAgAXIQRBRrsp2guyp6lbA76g4WkAAACyG6XoSAWcwwKQqZZf0AsCABhBCIKMVjM1TzOm5CoYtvXWqS7T4wAAAGQtXzCkg2d7JRGCwKxYCOIjBAGQYeIhyFnK0QHgQoQgyHjxXpATnMQCAAAwpeFcn/yhsKbkuTWrNM/0OMhiHmekj4ZzWAAyzcLKArmdlroHAzpznnJ0AIghBEHGoxcEAADAvD2N5yVFtkAsyzI8DbIZ57AAZCqvy6lFlZFydE5iAcAIQhBkvPq5kV6Q/c096hkKGJ4GAAAgO430gUwxOwiyHiEIgExWSzk6AFyCEAQZr7I4R3On5StsS2+epBcEAADAhHgIMrPE6BwAIQiATFYb7wXpNTwJAKQOQhBkhVgvyGvHOwxPAgAAkH3OD/h1qnNQkrRyRonZYZD1vLEQhE4QABkoXo7eTDk6AMQQgiArxE5i0QsCAACQfLEtkLll+SrOc5sdBlkvFoL4giHDkwDA5FtUWSiXw1LXgF9ne4ZNjwMAKYEQBFnhxrlTJUmHWvrU0e8zPA0AAEB2eSfeB1JidA5AkjxOzmEByFw5bqcWVETK0fedoRcEACRCEGSJ0gKvFldGvgh4/QTbIAAAAMlEHwhSCZ0gADLd8uoiSZGTWAAAQhBkkZvmRU5ivcZJLAAAgKSxbVt72QRBCvHEz2ERggDITPFekLOEIAAgEYIgi6ydHy1HP0Y5OgAAQLKc6hxUz1BAHpdDiyuLTI8DjJzDohgdQIaqpRwdAC5CCIKsccOcqXI6LJ3qHFRz95DpcQAAALJCbAukdnpR/BX4gEmcwwKQ6ZZUFcnpsNTR71dLL+XoAMCzEGSNwhy3VsyIvBqCbRAAAIDkiPWBrJhRYnQOIIYQBECmy3E7taC8QBLl6AAgEYIgy9w0L3ISaye9IAAAAEnx7pluSVJdTbHZQYAor4tzWAAyX/wk1tlew5MAgHmEIMgqa6Pl6K8e7+AuJgAAQIIFQmEdiH7zpY5NEKQIj9MpiU0QAJlt+QW9IACQ7QhBkFWumzVFHpdDrb0+negYMD0OAABARjvc0idfMKzCHJdml+abHgeQxDksANmhtrpIkrSPEAQACEGQXXLcTq2eOUUSvSAAAACJ9m70DnndjBI5HJbhaYCIWAgSDNsKh9kOB5CZllYVy2FJ7X0+tVKODiDLEYIg66ydH+kFeY1eEAAAgITaGy9Fpw8EqSMWgkj0ggDIXLkepxaUF0qiHB0ACEGQdeqjvSA7T3Tyyi8AAIAE2hstRV9BHwhSiPeCEMTHSSwAGSxWjv4uJ7EAZDlCEGSduhnFKvC61D0Y0MFzvabHAQAAyEiD/qCOtvVLklbWlJgdBriAy2HJil5n8wVDZocBgARaHu0FoRwdQLYjBEHWcTkdumHOVEnSTk5iAQAAJMSBs70KhW2VF3pVWZxjehwgzrIseZyUowPIfMujm5j7mntk21zCAJC9CEGQlW6aF+kFefU45egAAACJEOsDqWMLBCko1gtCCAIgky2tKrqgHN1nehwAMIYQBFnppmgvyJsnuxSgDBEAAGDS7Y2WsNZRio4UFOsFoRgdQCa7qBydk1gAshghCLLS4spCTc33aNAfir9KEQAAAJPnXUrRkcI4hwUgWyyPvhiBEARANiMEQVZyOCzVz42exDpGLwgAAMBkOj/g1+nOQUnSCjZBkII4hwUgWyyvjoYg0RcnAEA2IgRB1lo7P3IS69Vj9IIAAABMpnejrzadXZqnkjyP4WmASxGCAMgWtbEQpLmXcnQAWYsQBFlr7fzIJsg7Tec14AsangYAACBzvEspOlJcLATx0QkCIMMtrSqS02Gpo59ydADZixAEWWvm1DzNmJKrQMjWm6e6TI8DAACQMfbSB4IURycIrtXjjz+u2bNnKycnR2vWrNGbb755xY//zne+o0WLFik3N1c1NTX6/Oc/r+Hh4SRNi2wWKUcvkDTS1wUA2YYQBFnLsiytnRc5ifUaJ7EAAAAmhW3b2tMUOYdVRx8IUhTnsHAtnnrqKW3atEmPPPKIdu/erbq6Oq1fv15tbW2jfvyPf/xjfelLX9IjjzyihoYG/eAHP9BTTz2lv/3bv03y5MhWsZNY+ylHB5ClCEGQ1dYuiIQgr1CODgAAMCnO9Qyro98np8PSsumEIEhNHpdTEiEIJuaxxx7Tfffdpw0bNmjp0qXaunWr8vLy9MQTT4z68a+99prWrl2ru+++W7Nnz9btt9+uT33qU1fdHgEmS7wcnRAEQJYiBEFWu2lepBek4VyvOvu5jQkAAHCtYqc2FlYUKtfjNDsMcBnxc1h0gmCc/H6/du3apXXr1sXf53A4tG7dOu3cuXPUx9x0003atWtXPPQ4ceKEnn/+ed1xxx1JmRlYPmMkBKEcHUA2mlAIwu1LZIqyAq8WVxZKkl47zjYIAADAtXr3DKewkPq87mgxeiBkeBKkm46ODoVCIVVUVFz0/oqKCrW0tIz6mLvvvltf+9rXdPPNN8vtdmvevHm69dZbr3gOy+fzqbe396I3YKJGytH9aunl+3EAss+4QxBuXyLTrJ0f7QU5Ti8IAADAtYqd2ojdHwdSkZdNECTRjh079Oijj+p73/uedu/erZ/97Gd67rnn9PWvf/2yj9myZYuKi4vjbzU1NUmcGJkmxz1Sjr7vDCexAGSfcYcg3L5Eprl5fqwXhBAEAADgWti2HQ9BVrAJghRGMTomqqysTE6nU62trRe9v7W1VZWVlaM+5qGHHtI999yjz372s1q+fLk++clP6tFHH9WWLVsUDo/+38HNmzerp6cn/tbU1DTpnwuyC70gALLZuEKQZN2+ZO0TyXTDnKlyOSw1dQ2psXPQ9DgAAABp68z5IXUPBuR2WloUPTkKpCJCEEyUx+PR6tWrtX379vj7wuGwtm/frvr6+lEfMzg4KIfj4m+/OJ2RzqTL9TN4vV4VFRVd9AZciwt7QQAg27jG88FXun156NChUR9z9913q6OjQzfffLNs21YwGNTnPve5K57D2rJli7761a+OZzRgwvK9Lq2aWaK3Tp3Xq8c7NLN0pumRAAAA0lLsGyuLKgvldVGKjtQVK0b3cQ4LE7Bp0yb9+Z//ua6//nrdcMMN+s53vqOBgQFt2LBBknTvvfequrpaW7ZskSTdeeedeuyxx7Rq1SqtWbNGx44d00MPPaQ777wzHoYAiRY7U7k/Wo5uWZbhiQAgecYVgkzEhbcvY3/ZP/jgg/r617+uhx56aNTHbN68WZs2bYr/c29vL/cvkVBr55dFQpBjHfrUDYQgAAAAExErRV9eXWJ2EOAq2ATBtbjrrrvU3t6uhx9+WC0tLVq5cqVeeOGF+AtGGxsbL9r8+MpXviLLsvSVr3xFzc3NmjZtmu68805985vfNPUpIAtdWI5+rmdY00tyTY8EAEkzrhDkWm9fStLy5cs1MDCgv/iLv9CXv/zlS1ZCpcjap9frHc9owDVZO79M3/ntUb12vFPhsC2Hg1dEAAAAjNe+5m5J9IEg9RGC4Fpt3LhRGzduHPXf7dix46J/drlceuSRR/TII48kYTJgdDlupxZWFKrhXK/ePdNDCAIgq4yrEyRZty+BZFtZU6J8j1NdA34daukzPQ4AAEDasW1b++KbIIQgSG2EIACy0Yp4OXq32UEAIMnGFYJIkduX27Zt05NPPqmGhgbdf//9l9y+3Lx5c/zj77zzTv3jP/6jfvKTn+jkyZP6zW9+w+1LpBy306Eb5kyVJL1yrN3wNAAAAOmnsWtQvcNBeZwOLaygFB2pLdYJ4qcTBEAWiZWjx85XAkC2GHcnCLcvkanWzi/Ti4fb9cqxTv3Fe+eZHgcAACCtxL6hsqSqMP4qeyBVedkEAZCF6maUSIr8nU05OoBsMqFidG5fIhPdsmCapAa9ebJTw4GQctxsKgEAAIzVvuZICFLLKSykAc5hAchGiyoL5XE61DMUUGPXoGaV5pseCQCSgpdoAVELKwpUXujVcCCs3afPmx4HAAAgrcT6QChFRzqIhyCcwwKQRTwuh5ZURU5WchILQDYhBAGiLMvSzfPLJEkvH+swPA0AAED6CIdt7W+OlaKXmB0GGANPtJ/SxyYIgCyzIn4Sq9voHACQTIQgwAVuXhAJQV45SggCAAAwVqc6B9TnC8rjcmhBRYHpcYCrinWCEIIAyDaUowPIRoQgwAVimyD7z/bo/IDf8DQAAADpIdYHsrSqSG4nTzGQ+ugEAZCtYuXo+5t7FArbZocBgCThGQpwgfKiHC2qKJRtS68d7zQ9DgAAQFqgDwTpZiQECRmeBACSa960fOW6nRrwh3Syo9/0OACQFIQgwB+In8Q61m54EgAAgPTwbnQTpLaaEATpgWJ0ANnK5XSotrpIkrS3iZNYALIDIQjwB2IhyMtHO2TbrIYCAABcSThs60AzmyBILx4n57AAZK/l1SWSRs5ZAkCmIwQB/sCaOVPldlo6c35IpzsHTY8DAACQ0k50DGjAH1KO26H50yhFR3rw0gkCIIvV1URetLD3TLfZQQAgSQhBgD+Q53HpuplTJEkvH+swPA0AAEBq29fcLSlSiu6iFB1pgmJ0ANlsefR85cGzvQpwFhBAFuBZCjCKW2K9IEfpBQEAALiSfWd6JY18QwVIB3SCAMhms0vzVZjjki8Y1pHWPtPjAEDCEYIAo7h5wTRJ0mvHOxXkiREAAMBl7T8buSe+fEaJ2UGAcYh1ggRCtsJhegABZBeHw4q/eGHfGXpBAGQ+QhBgFMuri1Wc61bfcFDvUhQGAAAwqnDY1sGzkU2Q2uoiw9MAYxfbBJHYBgGQnVZEX7ywlxAEQBYgBAFG4XRYumleqSTplaP0ggAAAIzmVOeA+n1BeV2UoiO9EIIAyHYrZkQ3QaLdXgCQyQhBgMu4Od4LQggCAAAwmv3RLZDFlKIjzXgu+O8r5egAslEsBDl0rk/DgZDhaQAgsXimAlzGe6O9ILsbz6tvOGB4GgAAgNRzIHo2dDmnsJBmLMuKByE+QhAAWai6JFdT8z0Khm0daqEcHUBmIwQBLqNmap7mlOUrGLa183in6XEAAABSTqwUvXZ6seFJgPHzRk9isQkCIBtZlhXfBtnb1G12GABIMEIQ4AreGz2J9dLRdsOTAAAApBbbtrW/OVaKTgiC9OMhBAGQ5eri5ejdRucAgEQjBAGu4JboSayX6QUBAAC4yJnzQ+oZCsjttLSwotD0OMC4EYIAyHYra0oksQkCIPMRggBXcOO8Urkclk53Dup054DpcQAAAFLG/mgfyKLKwvg3k4F0Eg9BQhQCA8hOsXNYx9sH1EsXKoAMxrMV4AoKvC6tnjVFkvQS2yAAAABx9IEg3VGMDiDblRZ4VTM1V5K070yP4WkAIHEIQYCreO/CyEmsl47QCwIAABAT6wNZRh8I0hTnsABgpBdkDyexAGQwQhDgKt4b7QXZebxTgRBPkAAAACKl6JFXjC4nBEGaIgQBAHpBAGQHQhDgKpZNL9LUfI/6fUG909htehwAAADjWnqH1Tngl9NhaXElpehIT7FzWH5e6AQgi9XFQpAz3UbnAIBEIgQBrsLhsHTz/DJJ0stHOYkFAAAQO4W1oLxAOW6n4WmAiWETBAAi3V5Oh6XWXp9aeoZNjwMACUEIAozBLQsiIQi9IAAAAIqfwlpGKTrSmJcQBACU63FqUUVkq3NP03nD0wBAYhCCAGMQK0d/t7lHXQN+w9MAAACYdeBsrA+kyPAkwMTFN0E4hwUgy8VOYu1p6jE7CAAkCCEIMAYVRTlaVFEo25ZePdZhehwAAACjYuewailFRxqLd4KwCQIgy62sifx9Tjk6gExFCAKM0XsXchILAACgvc+nlt5hWZa0pIpNEKSv2CaIjxAEQJaLbYLsa+5RKGybHQYAEoAQBBijWxZETmK9dLRdts0XBQAAIDvtj57CmluWr3yvy/A0wMR5XU5JhCAAsKC8UHkep/p9QZ1o7zc9DgBMOkIQYIxumDNVOW6HWnt9OtzaZ3ocAAAAIw40x/pAOIWF9OahGB0AJElOhxU/cbmHk1gAMhAhCDBGOW6n6ueWSpJ+f5iTWAAAIDvRB4JMQQgCACNWRk9i7T3TbXQOAEgEQhBgHN63MHIS6/f0ggAAgCwVO4e1dDp9IEhv8WL0UMjwJABgXt2MEknS3qYes4MAQAIQggDj8L5F5ZKkt051acAXNDwNAABAcvUMBnTm/JAkadl0NkGQ3tgEAYARdTWRv9cbzvVqOEA4DCCzEIIA4zC7NE8zp+YpELL12vFO0+MAAAAk1YHoFkjN1FwV57oNTwNcGy8hCADEVZfkqqzAo2DY1sFzvabHAYBJRQgCjINlWRecxGozPA0AAEByHTgb7QNhCwQZIL4JEiIEAQDLskZ6QShHB5BhCEGAcbp1USQE2XG4XbZtG54GAAAgeWKbIMvoA0EGiHeCsAkCAJJGekHeaew2OgcATDZCEGCcbpxbKo/ToTPnh3SyY8D0OAAAAEmzP7oJsqyaTRCkv9gmiI8QBAAkSatmTpEk7WETBECGIQQBxinf69J75kS+MNhxuN3wNAAAAMkx5A/pRHu/JDZBkBkoRgeAi62oKZZlSY1dg+ro95keBwAmDSEIMAEjvSCEIAAAIDs0tPQqbEvTCr0qL8wxPQ5wzeLnsOgEAQBJUlGOWwvKCyRJeziJBSCDEIIAE/C+heWSpNdPdGo4EDI8DQAAQOIdaI70gdSyBYIMET+HFSAEAYCYWDn6O03nzQ4CAJOIEASYgIUVBaoqzpEvGNYbJ7tMjwMAAJBwB2J9INPpA0FmiJ/DYhMEAOJivSCUowPIJIQgwARYljVyEoteEAAAkAX2n41sgtAHgkzhdTkl0QkCABdaNbNEkrS3qVuhsG12GACYJIQgwATFQpAdR9oMTwIAAJBYgVBYR1oipei11WyCIDN4KUYHgEssKC9UvsepAX9IR9v6TI8DAJOCEASYoJvml8npsHSifUCNnYOmxwEAAEiYo6398ofCKspxacaUXNPjAJOCc1gAcCmnw1JdrBeEk1gAMgQhCDBBxblurZ4VuZXJNggAAMhksVNYS6cXybIsw9MAk8PjZBMEAEYTO4n1TiPl6AAyAyEIcA1uW1QuSXrxECEIAADIXAejpei1lKIjg3g4hwUAo1pVQzk6gMxCCAJcg9sWR3pBXjveqeFAyPA0AAAAiXEgVopeTSk6MseF57Bsm/JfAIhZGd0EOdrWr56hgNlhAGASEIIA12BRRaGqinPkC4a180Sn6XEAAAAmXThsswmCjBQLQSR6QQDgQmUFXs2cmidJevdMt9lhAGASEIIA18CyLN0aPYm1g5NYAAAgA53qHNCAP6Qct0NzpxWYHgeYNLFOEImTWADwh0Z6QbqNzgEAk4EQBLhGty2KnMR68XA7a/QAACDjHIhugSyuLJLTQSk6MgchCABc3qqaEkmUowPIDIQgwDVaO79MHqdDjV2DOtExYHocAACASbU/2gdSSx8IMozDYcntjAR7nMMCgIutmhktR2/q5gWfANIeIQhwjfK9Lq2ZO1WS9CInsQAAQIaJ9YEsow8EGSi2DeILEIIAwIWWVBXJ43KoezCgU52DpscBgGtCCAJMgngvyOF2w5MAAABMHtu24+ewlk1nEwSZJ1aOziYIAFzM43KoNvp3PyexAKQ7QhBgEsR6Qd442akBX9DwNAAAAJPjXM+wugb8cjosLawoND0OMOniIQidIABwifhJLMrRAaQ5QhBgEswpy9es0jwFQrZePdZhehwAyHiPP/64Zs+erZycHK1Zs0ZvvvnmFT++u7tbDzzwgKqqquT1erVw4UI9//zzSZoWSF+xLZAF5QXKcTsNTwNMPq8r8t9rHyEIAFxi1cwSSdJuNkEApDlCEGASWJal26InsV7kJBYAJNRTTz2lTZs26ZFHHtHu3btVV1en9evXq61t9F4mv9+vD37wgzp16pSefvppHT58WNu2bVN1dXWSJwfSz4FoKfpSTmEhQ7EJAgCXt3pWZBPkUEsfVy8ApDVCEGCS3Bo9ibXjcJts2zY8DQBkrscee0z33XefNmzYoKVLl2rr1q3Ky8vTE088MerHP/HEE+rq6tIzzzyjtWvXavbs2Xrf+96nurq6JE8OpJ8DlKIjw8WK0ekEAYBLVRXnanpxjkJhW3vPdJseBwAmjBAEmCQ3zi1Vjtuhcz3DOtTSZ3ocAMhIfr9fu3bt0rp16+LvczgcWrdunXbu3DnqY5599lnV19frgQceUEVFhWpra/Xoo48qFAola2wgbR2kFB0Zjk0QALiy66LbILtPcxILQPoiBAEmSY7bqbXzyiRJvzs0+kkWAMC16ejoUCgUUkVFxUXvr6ioUEtLy6iPOXHihJ5++mmFQiE9//zzeuihh/Ttb39b3/jGNy776/h8PvX29l70BmSb8wN+NXcPSeIcFjIXIQgAXFnsJNYuQhAAaYwQBJhE718S6QUhBAGA1BEOh1VeXq7vf//7Wr16te666y59+ctf1tatWy/7mC1btqi4uDj+VlNTk8SJgdRw8Fwk/Js5NU9FOW7D0wCJ4Y2FIGwHAsCoYiHI7sZuhcOc/gaQnghBgEn0/sWREGR343l1DfgNTwMAmaesrExOp1Otra0Xvb+1tVWVlZWjPqaqqkoLFy6U0+mMv2/JkiVqaWmR3z/6n9WbN29WT09P/K2pqWnyPgkgTcRK0TmFhUzmjnaCBIJ8Yw8ARrOkqkg5bod6hgI60dFvehwAmBBCEGASVRXnamlVkWw7UpAOAJhcHo9Hq1ev1vbt2+PvC4fD2r59u+rr60d9zNq1a3Xs2DGFwyOnTo4cOaKqqip5PJ5RH+P1elVUVHTRG5BtDtAHgizgcliSpECYc1gAMBq306G6GSWSOIkFIH0RggCT7APRk1jbOYkFAAmxadMmbdu2TU8++aQaGhp0//33a2BgQBs2bJAk3Xvvvdq8eXP84++//351dXXpwQcf1JEjR/Tcc8/p0Ucf1QMPPGDqUwDSwkgIUmx4EiBxXM5ICBLixAsAXBa9IADSncv0AECmuW1xub77u2N66XC7AqFwfMUeADA57rrrLrW3t+vhhx9WS0uLVq5cqRdeeCFelt7Y2CiHY+TP3pqaGv3qV7/S5z//ea1YsULV1dV68MEH9cUvftHUpwCkvCF/SCfaIycv2ARBJnNF/74IhAhBAOByCEEApDtCEGCS1c0oUWm+R50Dfr11qks3zSszPRIAZJyNGzdq48aNo/67HTt2XPK++vp6vf766wmeCsgcDS29CttSWYFX5UU5pscBEmZkE4RzWABwOatmRkKQ4+0DOj/g15T80U/KAkCq4iXqwCRzOizduihyEut3DZzEAgAA6Yc+EGSLeCcImyAAcFlT8z2aOy1fkvROE9sgANIPIQiQALFekN/RCwIAANLQwbM9kghBkPlc0dO1QUIQALii1TM5iQUgfRGCAAlwy4IyuRyWTnQMxO9pAwAApAtK0ZEtYpsgnMMCgCujFwRAOiMEARKgMMetNXOnSmIbBAAApJdAKKxDLX2S2ARB5osXo4fZBAGAK4mFIHubehQIERwDSC+EIECCvH9xhSRCEAAAkF6Ot/fLHwyrwOvSzKl5pscBEsodL0YnBAGAK5k3rUBFOS4NBUI6dK7P9DgAMC6EIECCfGBxpBfkzZNd6h0OGJ4GAABgbA40R05hLZ1eJEf0VBCQqZzxYnRe1QwAV+JwWLoufhKry/A0ADA+hCBAgswuy9fcafkKhm29fKTD9DgAAABjsp9SdGQRitEBYOzi5eiN3WYHAYBxIgQBEii2DbK9odXwJAAAAGNDKTqySawYPcg5LAC4qng5+ik2QQCkF0IQIIHWLYn2ghxuU5AVewAAkOLCYVsN8RCETRBkPle0E4Sv1QHg6upqSuR0WDrbM6zm7iHT4wDAmBGCAAm0etYUleS51T0Y0K7T502PAwAAcEVN5wfV5wvK43RofnmB6XGAhHM7Ik+JKUYHgKvL97riL5J4m20QAGmEEARIIJfTofcvip7EOtRmeBoAAIAri53CWlRZKLeTpwrIfPFidEIQABiT62dNlSS9fYoXegJIHzyzARLsA9GTWL89SC8IAABIbQcoRUeWcXMOCwDG5T2zI70gb7EJAiCNEIIACfbehWVyOy2d6BjQ8fZ+0+MAAABc1gH6QJBlnNFzWBSjA8DYrI6GIIdb+9QzFDA8DQCMDSEIkGCFOW7dOLdUkrS9gW0QAACQumIhyNLpxYYnAZKDYnQAGJ/ywhzNLs2TbUu76T4FkCYIQYAk+ODS2EksekEAAEBqausdVnufT5YlLakqND0OkBTxc1hsggDAmL1ndqQXhJNYANIFIQiQBLFekLdPd+n8gN/wNAAAAJeKbYHMm1agPI/L8DRAcsTPYYUIQQBgrGIhCOXoANIFIQiQBNUluVpSVaSwLb14mG0QAACQeihFRzZyO2KbIJzDAoCxuj7aC7LnTLd8wZDhaQDg6ghBgCT54JJySdJv6QUBAAApiFJ0ZCOng3NYADBec8ryVZrvkT8Y1r4zPabHAYCrIgQBkmRdtBfk94fbeaUEAABIOSMhCKXoyB5uJ+ewAGC8LMuKb4O8xUksAGmAEARIktrpxaoo8mrAH9IbJygPAwAAqaNnKKDGrkFJbIIgu7goRgeACRnpBeH7GwBSHyEIkCQOh6X3L45sg/zmICexAABA6jgY3QKpLslVSZ7H8DRA8sTPYYXoBAGA8YiHIKfPK0yQDCDFEYIASXR79CTWbxtaZdt8kQAAAFIDpejIVvFzWHwDDwDGZen0IuW6neoZCuhoW7/pcQDgighBgCSqn1eqPI9T53qGtb+51/Q4AAAAkkY2QegDQbYZKUZnEwQAxsPtdGjVzBJJ0lucxAKQ4ghBgCTKcTt166JpkqRfH2wxPA0AAEDESCk6myDILm4HxegAMFH0ggBIF4QgQJJ9cCm9IAAAIHUMB0I61h45Y7GsmhAE2YVidACYuFgI8tap84YnAYArIwQBkuy2ReVyOiwdaulTY+eg6XEAAECWO9TSp1DY1tR8jyqLckyPAySVi2J0AJiwVTNL5HRYau4eUnP3kOlxAOCyCEGAJCvJ82jNnMirJTiJBQAATLuwFN2yLMPTAMnlcnIOCwAmKt/rip/SfPNkp+FpAODyCEEAA2InsX7NSSwAAGDYAUrRkcXimyCcwwKACYm9yPPNk/SCAEhdhCCAAbEQ5O1TXeoa8BueBgAAZDNK0ZHNRjpBOIcFABOxZk6pJOmNE4QgAFIXIQhgwIwpeVpaVaSwLW1vYBsEAACYEQyFdegcIQiyl8sRPYfFJggATMh7Zk+VZUknOgbU1jdsehwAGBUhCGBIbBvkN5zEAgAAhpzoGJAvGFa+x6nZpfmmxwGSLnYOy7alEEEIAIxbcZ5biytjvSBsgwBITYQggCG3L4uEIC8dbdeQP2R4GgAAkI32N0dK0ZdUFcnhoBQd2Sd2DkuSAiFOYgHARMR6QTiJBSBVEYIAhiytKlJ1Sa6GA2G9cqzD9DgAACALxfpAaqspRUd2ip3DktgEAYCJohwdQKojBAEMsSwrfhLr1wdaDE8DAACyUWwThD4QZKsLN0GCIUIQAJiIG6IhyOHWPnUN+A1PAwCXIgQBDLo9GoJsP9SmIOv3AAAgicJhWwfPxkrR2QRBdnJdcAYuGObrcQCYiNICrxaUF0iS3jrFNgiA1EMIAhh0w5ypKslzq2vAr7dPnzc9DgAAyCJN5wfV5wvK43RoQUWB6XEAIyzLkjMahAQ5hwUAE3YDvSAAUhghCGCQy+nQBxZHtkFe2M9JLAAAkDyxPpBFlYVyO3lagOwV2wahGB0AJm7N3FJJ0hsnOw1PAgCX4tkOYNj6ZZEQ5DcHW2XbvPoMAAAkx4GzkT6Q2mr6QJDdYiEIxegYj8cff1yzZ89WTk6O1qxZozfffPOKH9/d3a0HHnhAVVVV8nq9WrhwoZ5//vkkTQskXqwc/eC5XvUOBwxPAwAXIwQBDHvvwmnKdTvV3D2k/c29pscBAABZIvZ1x1L6QJDlXNFNqADF6Bijp556Sps2bdIjjzyi3bt3q66uTuvXr1dbW9uoH+/3+/XBD35Qp06d0tNPP63Dhw9r27Ztqq6uTvLkQOJUFOVodmmebFt6m14QACmGEAQwLMft1K2LpkmSfnWAk1gAACDxbNse2QSZziYIspvbySYIxuexxx7Tfffdpw0bNmjp0qXaunWr8vLy9MQTT4z68U888YS6urr0zDPPaO3atZo9e7be9773qa6uLsmTA4m1Zk7sJBYhCIDUQggCpID1yyolEYIAAIDkaOvzqaPfL4clLa4kBEF2c9IJgnHw+/3atWuX1q1bF3+fw+HQunXrtHPnzlEf8+yzz6q+vl4PPPCAKioqVFtbq0cffVShUOiyv47P51Nvb+9Fb0CqWzOXcnQAqYkQBEgBty0ul8th6Whbv06095seBwAAZLjYFsj88gLlepyGpwHMcjkiT4uDbIJgDDo6OhQKhVRRUXHR+ysqKtTSMvqL2k6cOKGnn35aoVBIzz//vB566CF9+9vf1je+8Y3L/jpbtmxRcXFx/K2mpmZSPw8gEW6I9oLsb+7RgC9oeBoAGEEIAqSA4ly36udF1kZ/daDV8DQAACDTxfpAltEHAsgVP4fFJggSIxwOq7y8XN///ve1evVq3XXXXfryl7+srVu3XvYxmzdvVk9PT/ytqakpiRMDEzNjSp6qS3IVDNvadfq86XEAII4QBEgRsZNYL3ASCwAAJFhsE2QZfSCAXPFzWGyC4OrKysrkdDrV2nrxi9daW1tVWVk56mOqqqq0cOFCOZ0jm3dLlixRS0uL/H7/qI/xer0qKiq66A1IBzfOjbzA8/UTnYYnAYAREwpBHn/8cc2ePVs5OTlas2aN3nzzzSt+fHd3tx544AFVVVXJ6/Vq4cKFev755yc0MJCpbl9aIcuS9jZ1q6Vn2PQ4AAAgg7EJAoxwOyNPiylGx1h4PB6tXr1a27dvj78vHA5r+/btqq+vH/Uxa9eu1bFjxxS+YNvoyJEjqqqqksfjSfjMQDLFrlzsJAQBkELGHYI89dRT2rRpkx555BHt3r1bdXV1Wr9+vdra2kb9eL/frw9+8IM6deqUnn76aR0+fFjbtm1TdXX1NQ8PZJLyohytqimRJP36INsgAAAgMboH/WruHpIkLWUTBKAYHeO2adMmbdu2TU8++aQaGhp0//33a2BgQBs2bJAk3Xvvvdq8eXP84++//351dXXpwQcf1JEjR/Tcc8/p0Ucf1QMPPGDqUwASJhaCvHumR/30ggBIEa7xPuCxxx7TfffdF//LfevWrXruuef0xBNP6Etf+tIlH//EE0+oq6tLr732mtxutyRp9uzZ1zY1kKHWL6vU7sZu/epAi+6tn216HAAAkIEOnI1sgcycmqfiXLfhaQDzXNFNkCDnsDBGd911l9rb2/Xwww+rpaVFK1eu1AsvvBAvS29sbJTDMfKa05qaGv3qV7/S5z//ea1YsULV1dV68MEH9cUvftHUpwAkTHVJrmZOzVNj16DeOtWl2xaVmx4JAMYXgvj9fu3ateuiVzQ4HA6tW7dOO3fuHPUxzz77rOrr6/XAAw/oF7/4haZNm6a7775bX/ziFy+6h3khn88nn88X/+fe3t7xjAmkrfXLKrXl3w/p9RNdOj/g15R8VqMBAMDkivWB1FazBQJII50gQc5hYRw2btyojRs3jvrvduzYccn76uvr9frrryd4KiA11M8tVWPXoHYe7yQEAZASxnUOq6OjQ6FQKP7qhpiKigq1tIx+vufEiRN6+umnFQqF9Pzzz+uhhx7St7/9bX3jG9+47K+zZcsWFRcXx99qamrGMyaQtmaX5WtJVZFCYVu/aWi9+gMAAADGiT4Q4GIjIQjnsABgMsR7QY7TCwIgNUyoGH08wuGwysvL9f3vf1+rV6/WXXfdpS9/+cvaunXrZR+zefNm9fT0xN+ampoSPSaQMj5cWylJ+vd95wxPAgAAMlFsE2QZfSCAJIrRAWCyxUKQA2d71DMUMDwNAIwzBCkrK5PT6VRr68WvUG9tbVVlZeWoj6mqqtLChQsvOn21ZMkStbS0yO/3j/oYr9eroqKii96AbBELQV451qHeYb5YAAAAk2fQH9SJjgFJbIIAMSPF6IQgADAZKopyNLcsX2FbevNkl+lxAGB8IYjH49Hq1au1ffv2+PvC4bC2b9+u+vr6UR+zdu1aHTt2TOELVouPHDmiqqoqeTz0HQB/aEFFoeaXFygQsvW7hjbT4wAAgAzScK5Xti1VFHk1rdBrehwgJbid0XNYIc5hAcBkuZGTWABSyLjPYW3atEnbtm3Tk08+qYaGBt1///0aGBjQhg0bJEn33nvvRcXp999/v7q6uvTggw/qyJEjeu655/Too4/qgQcemLzPAsgwsW2Q5zmJBQAAJtGBs/SBAH/ISTE6AEy6+rnREOQEIQgA81zjfcBdd92l9vZ2Pfzww2ppadHKlSv1wgsvxMvSGxsb5XCMZCs1NTX61a9+pc9//vNasWKFqqur9eCDD+qLX/zi5H0WQIb5UG2lvvu7Y/r9kXYN+ILK9477f6oAAACX2N9MHwjwh1zRThA2QQBg8twYDUEazvXq/IBfU/K5BgPAnAl9Z3Xjxo3auHHjqP9ux44dl7yvvr5er7/++kR+KSArLa0q0qzSPJ3uHNSOw+36yIoq0yMBAIAMsL85sglSW80mCBDjZhMEACbdtEKvFpQX6Ghbv9442akP1fJ9DQDmjPscFoDEsyxLH4qdxNrPSSwAAHDtfMGQjrT2SWITBLiQM3rJgBAEACZXPb0gAFIEIQiQoj4cfZXEi4faNBwIGZ4GAACku6Ot/QqGbZXkuVVdkmt6HCBlUIwOAIlBLwiAVEEIAqSouhnFml6co0F/SC8daTc9DgAASHOxPpDa6cWyLMvwNEDqoBgdABJjTTQEOdLar45+n+FpAGQzQhAgRUVOYkW2Qf59f4vhaQAAQLrbfzZail7NKSzgQu54MTohCABMpqn5Hi2uLJQkvc42CACDCEGAFPbh5ZFekN82tMoX5CQWAACYuHgp+nRK0YELudgEAYCEuWlemSTpNXpBABhECAKksNUzp6i80Ku+4aBePdZhehwAAJCmgqGwGs5FQhBK0YGLOekEAYCEWTs/chKL72kAMIkQBEhhDoelO5ZHTmL9cu85w9MAAIB0dbx9QL5gWPkep2aX5pseB0gpbkf0HBabIAAw6dbMLZXTYel056CaugZNjwMgSxGCACnuIysiIchvDrZqOMBJLAAAMH4HYn0g04vlcFCKDlxopBidTRAAmGwFXpdW1pRIkl47zjYIADMIQYAUt3rmFFUW5ajPF9TLR/mCAQAAjF+sD4RSdOBS7vg5LDZBACAR1s6P9IK8eoxeEABmEIIAKe7Ck1jPvXvW8DQAACAd7Y9uglCKDlzK5Yw8LQ4QggBAQqydF+kFee14h2ybP2sBJB8hCJAGOIkFAAAmKhy2dfAsmyDA5bii57BCnMMCgIRYNXOKct1OdfT7dbi1z/Q4ALIQIQiQBq6bWaLqklwN+EPacbjd9DgAACCNnO4aVL8vKK/LofnTCkyPA6ScWAgSoBgdABLC43LohjlTJUmvcOYbgAGEIEAasCxLdyyvlCQ9t++c4WkAAEA62d8cOYW1uKoofvYHwAhn9H8XIc5hAUDC3BzvBSEEAZB8PAsC0sRHVkyXJG1vaNWQn5NYAABgbA5ET2HVTucUFjAad3QTJMg5LABImJvmR3pB3jjZpUCIP28BJBchCJAm6mYUa8aUXA36Q3rxcJvpcQAAQJo4ECtFr6YUHRgNxegAkHhLKos0Nd+jQX9Ie5q6TY8DIMsQggBpwrKseEH6c+9yEgsAAFydbdvxc1jL2AQBRjVSjE4IAgCJ4nBYqp8X2QbhJBaAZCMEAdLIR5dHT2IdatWgP2h4GgAAkOrO9gzr/GBALoelhRWFpscBUpLLGS1G5zwLACQUvSAATCEEAdJIbXWRZpXmaTgQ1m8bOIkFAACuLLYFsqCiUDlup+FpgNTEJggAJMfaeZEQ5J3Gbg34eGEngOQhBAHSiGVZujNakP7snmbD0wAAgFR3IBqCUIoOXJ7LEe0EIQQBgISaWZqnmqm5CoZtvXmyy/Q4ALIIIQiQZj6xKhKC7DjcrvMDfsPTAACAVHbgbK8k+kCAK4mdwwpyDgsAEi62DfIKJ7EAJBEhCJBm5pcXamlVkYJhW8/vpyAdAABc3r7oJsjyGcWGJwFSV2wThHNYAJB4a+kFAWAAIQiQhmLbIL9456zhSQAAQKpq6x1WW59PliUtqWITBLgcitEBIHnWzi+TZUmHWvrU1jtsehwAWYIQBEhDd9ZNl2VJb57q0pnzg6bHAQAAKSh2CmvetALleVyGpwFSF8XoAJA8U/M9Wl4d2VB9+SjbIACSgxAESENVxblaM2eqJOnf9nISCwAAXGo/pejAmLic0WL0ECEIACTDLQsiJ7FePtpueBIA2YIQBEhTH19ZLUn6xZ5mw5MAAIBUtP9sNASppg8EuJLYJkgwzDksAEiGWxZMkxQpRw+zhQcgCQhBgDR1R22V3E5Lh1r6dKil1/Q4AAAgxexvjnx9sGw6IQhwJbFOEM5hAUByXDdzivI8TnX0+9XA9zMAJAEhCJCmivPcunVRuSTpF3soSAcAACPOD/jV3D0kSVrKOSzgilwOzmEBQDJ5XA7Vzy2VRC8IgOQgBAHS2CeiJ7Ge3XOWFVIAABAXK0WfVZqn4ly34WmA1OZmEwQAko5eEADJRAgCpLEPLClXgdel5u4h7Wo8b3ocAACQIuJ9IJzCAq7KGe0ECYToBAGAZLllYaQX5K2T5zXkDxmeBkCmIwQB0liO26n1yyolST/bTUE6AACI2N8cCUGWVXMKC7gatzPytDjIJggAJM3csnxVl+TKHwrrjZOdpscBkOEIQYA098fXRU5i/fLdsxoO8OoJAAAwcg6LTRDg6mKbIKGwLdsmCAGAZLAs64KTWPSCAEgsQhAgzd04t1TVJbnqGw7q1wdbTY8DAAAM6xsO6GTHgCRpGaXowFW5HSNPi9kGAYDkuWVB5CQWvSAAEo0QBEhzDocV3wZ5etcZw9MAAADTDka3QKYX56i0wGt4GiD1uaLF6BLl6ACQTGvnl8qypCOt/WrpGTY9DoAMRggCZIA/um6GJOmVo+184QAAQJbbHw1BllVzCgsYi9g5LIlydABIppI8j1bMKJHENgiAxCIEATLA7LJ8vWf2FIVt6efvUJAOAEA2OxAtRV9OCAKMSawYXZKCITZBACCZbplPLwiAxCMEATLEn6yObIM8vauJQkcAALLY/rOREKS2mj4QYCwuWAShEwQAkixWjv7KsQ5OEgJIGEIQIEPcsbxKOW6HjrcPaO+ZHtPjAAAAA4b8IR1r65ck1U5nEwQYC8uy5I72ggTDnMMCgGS6btYUFXpd6hrwa38z38sAkBiEIECGKMxx68O1VZIi2yAAACD7NLT0KmxL0wq9Ki/KMT0OkDZcjshTY85hAUByuZ0OrY2exNpxmF4QAIlBCAJkkD+OFqQ/u+eshgMhw9MAAIBki/WB1E7nFBYwHi5HbBOEEAQAku19i6ZJkn5/pM3wJAAyFSEIkEHq55VqenGOeoeD2t7AFw8AAGSb/c29kqRaStGBcXHFzmGFOIcFAMl2azQE2dPUre5Bv+FpAGQiQhAggzgdlv4oug3y07c5iQUAQLaJlaIvYxMEGBdn7BwWmyAAkHRVxblaVFGosC29dLTD9DgAMhAhCJBh/vT6SAjy0tF2nTk/aHgaAACQLL5gSEda+ySxCQKMV7wYnU4QADAifhKLXhAACUAIAmSYWaX5Wju/VLYt/fQttkEAAMgWR1v7FQjZKslzq7ok1/Q4QFqJn8MKcw4LAEy4dWGsF6RdYbbyAEwyQhAgA33qhpmSpKfebuKuMQAAWWJfvBS9WJZlGZ4GSC8uzmEBgFGrZ09Rnsepjn6fDp7rNT0OgAxDCAJkoNuXVqo036PWXp9eZJUUAICssD8WgnAKCxg3lyMSHAZ4AREAGOF1OXXTvDJJkW0QAJhMhCBABvK4HPrj1ZFukJ+82Wh4GgAAkAz7z0ZeNVlbTSk6MF7OaAgSYhMEAIy5lV4QAAlCCAJkqP/rPTWSpBcPt+ls95DhaQAAQCIFQmE1RE9H1E5nEwQYL7czeg6LYnQAMOZ90V6QXY3n1TMUMDwNgExCCAJkqLnTCnTj3KkK29JP36YgHUBmefzxxzV79mzl5ORozZo1evPNN8f0uJ/85CeyLEuf+MQnEjsgkGTH2vrlD4ZV6HVp5tQ80+MAaWekGJ0QBABMqZmap3nT8hUK23r1WIfpcQBkEEIQIIPFCtJ/+lYTq/0AMsZTTz2lTZs26ZFHHtHu3btVV1en9evXq62t7YqPO3XqlL7whS/olltuSdKkQPLE+kCWTi+Sw0EpOjBesU6QIJ0gAGDUrYvKJXESC8DkIgQBMtj6ZZUqyXPrbM+wXqJYDECGeOyxx3Tfffdpw4YNWrp0qbZu3aq8vDw98cQTl31MKBTSpz/9aX31q1/V3LlzkzgtkBwHon0gyylFBybE5Yg8NQ7wwiEAMCp2EmvHkTbZNn8mA5gchCBABstxO/XH10UK0n/0BgXpANKf3+/Xrl27tG7duvj7HA6H1q1bp507d172cV/72tdUXl6uz3zmM8kYE0i62CZILSEIMCGxc1ihMJsgAGDSDXOmKtftVGuvTw3n+kyPAyBDEIIAGe5TN0QK0n93qFXNFKQDSHMdHR0KhUKqqKi46P0VFRVqaWkZ9TGvvPKKfvCDH2jbtm1j/nV8Pp96e3svegNSVShs62CsFL26yPA0QHqKncMKUIwOAEbluJ1aO79UUuT7GAAwGQhBgAw3v7xQN80rVdiWfvT6adPjAEBS9fX16Z577tG2bdtUVlY25sdt2bJFxcXF8beampoETglcm5Md/Rr0h5TncWpOWYHpcYC05HJGnhrTowcA5r1/ceQFT787dOXOPwAYK0IQIAvcWz9bkvSTt5o0HAiZHQYArkFZWZmcTqdaWy9+VVhra6sqKysv+fjjx4/r1KlTuvPOO+VyueRyufTP//zPevbZZ+VyuXT8+PFRf53Nmzerp6cn/tbU1JSQzweYDPubI1sgS6uK5KQUHZgQitEBIHXctjjSC/JOU7c6+32GpwGQCQhBgCywbkm5phfnqGvAr+fePWd6HACYMI/Ho9WrV2v79u3x94XDYW3fvl319fWXfPzixYu1b98+7dmzJ/72sY99TLfddpv27Nlz2Q0Pr9eroqKii96AVEUfCHDtYpsgnMMCAPOqinO1tKpIti39/ki76XEAZABCECALuJwOffrGWZKkf955yuwwAHCNNm3apG3btunJJ59UQ0OD7r//fg0MDGjDhg2SpHvvvVebN2+WJOXk5Ki2tvait5KSEhUWFqq2tlYej8fkpwJMiv1nIyHIsumEdcBExTZBOIcFAKnhA0vKJUnbOYkFYBIQggBZ4v96T408Tof2nunRnqZu0+MAwITddddd+ta3vqWHH35YK1eu1J49e/TCCy/Ey9IbGxt17hxbb8gO4bCtA82xUnQ2QYCJihejhzmHBQCp4LbFkRDkpcPtCnCqEMA1cpkeAEBylBZ49dEVVfrZO83659dOaeVdK02PBAATtnHjRm3cuHHUf7djx44rPvaHP/zh5A8EGNLYNag+X1Ael0PzyylFByYqXozOOSwASAl1M0pUmu9R54Bfb586r/p5paZHApDG2AQBssi9N82WJP3y3XPqoFwMAIC0FzuFtaSqSG4nX9oDEzWyCUIIAgCpwOmw9L5FkYL03x1qNTwNgHTHMyUgi6ysKVHdjGL5Q2E99VaT6XEAAMA12h87hUUfCHBNXM5ICBLk5AoApIwPLI6cu/0dvSAArhEhCJBl7q2fLUn6l9dP8yQPAIA0dyC6CUIfCHBtKEYHgNRzy8IyuRyWjrcP6FTHgOlxAKQxQhAgy3xkRZVK8z061zOsf9/fYnocAAAwQbZta39zNASZTggCXItYJ0iAThAASBlFOW69Z/ZUSWyDALg2hCBAlslxO/VnN86SJP3Tyydk2zzRAwAgHTV3D+n8YEBup6WFlZSiA9fCHd8EYVMaAFLJ+xeXS5JePEwIAmDiCEGALHRP/Sx5XA7tPdOjt0+fNz0OAACYgFgfyMKKQnldTsPTAOnN6YhugnAOCwBSyvuXREKQ1090qt8XNDwNgHRFCAJkobICr/5oVbWkyDYIAABIP/E+EE5hAdeMYnQASE1zy/I1uzRPgZCtl4+0mx4HQJoiBAGy1GduniNJ+vXBVgrGAABIQ/tifSDVRYYnAdJfrBg9yCYIAKQUy7K0bkmFJOk3B1sNTwMgXRGCAFlqQUWhbl00TbYt/e9XT5oeBwAAjMOFpejLqtkEAa5VrBg9SDE6AKSc25dVSpK2H2pTgI09ABNACAJksftumStJ+unbZ9QzGDA8DQAAGKu2Pp86+v1yOiwtrWITBLhWbmesGJ0QBABSzepZUzQ136OeoYDeOtVlehwAaYgQBMhiN80r1eLKQg0FQvrRm6dNjwMAAMZo35nIFsj8aQXKcVOKDlwrZ/QcFq8wBoDU43RYev/iSEH6rw9wEgvA+BGCAFnMsqz4NsiTr52SP8iTPgAA0sH+s7FTWGyBAJPB7Yiew2ITBABS0u1LR3pBbJs/qwGMDyEIkOXurJuu8kKvWnt9euadZtPjAACAMdjf3CtJqp1OHwgwGZwUowNASrtlwTTluB1q7h7SwXO9pscBkGYIQYAs53E54tsg39txTEFOAAAAkPJipejLZxCCAJPBFe0E4WthAEhNuR6nblkwTVJkGwQAxoMQBIDuXjNTU/LcOtU5qF++e870OAAA4Ara+3xq6R2WZUlLKEUHJoXbyTksAEh1H7zgJBYAjAchCADle136bHQb5H++eExhnvwBAJCyDkT7QOaU5avA6zI8DZAZ4uew2AQBgJT1gcXlcljSgbO9OnN+0PQ4ANIIIQgASdI99bNUlOPSsbZ+vXCgxfQ4AADgMuKnsKo5hQVMFreTThAASHWlBV5dP2uqJOm3bIMAGAdCEACSpKIct/7j2jmSpO/+7phsmyeAAACkIkrRgcnndETPYYX4GhgAUlnsJNavCUEAjAMhCIC4DTfNVr7HqYZzvdre0GZ6HAAAMIr90XNYy6rpAwEmizt2DivMOSwASGWxEOSNk13qGQwYngZAuiAEARA3Jd+jP6ufJUn67otsgwAAkGq6B/06c35IkrSMTRBg0rgoRgeAtDC7LF8LKwoUCtv63WG2QQCMDSEIgIvcd8tc5bgd2tvUrd8faTc9DgAAuEDsFNas0jwV57oNTwNkjpFidEIQAEh165dVSpKe30efKYCxIQQBcJGyAq8+vSayDfJ3/35IIV4NBwBAyoidwqIPBJhc8WL0EOewACDV3bG8SpL0+yPt6vcFDU8DIB0QggC4xH95/3wV57p1qKVP//p2k+lxAABA1P5m+kCARIhvgvACIABIeYsrCzWnLF/+YFjbGziJBeDqCEEAXKIkz6O/+sACSdK3f3OEV1YAAJAiYiEImyDA5HLTCQIAacOyLN2xPHIS6985iQVgDAhBAIzqnhtnaXZpntr7fPpfvz9uehwAALJe73BApzoHJUm11YQgwGRyRTdBApzDAoC0EDuJ9eLhNg3wwk0AV0EIAmBUHpdDm+9YIkn6/ksndLZ7yPBEAABkt4NnI6Xo04tzNDXfY3gaILO4HJGnxvThAUB6WFpVpFmlefIFw3rxcJvpcQCkOEIQAJd1+9IK3TBnqnzBsP7hV4dNjwMAQFaLn8JiCwSYdK54MTohCACkg8hJrMg2CCexAFwNIQiAy7IsSw99ZKkk6efvNGtvU7fZgQAAyGKxEGQ5IQgw6VzxYnTOYQFAurijNhKC/O5Qm4b8IcPTAEhlhCAArmj5jGL90XXVkqT/+q97ubUJAIAh+9gEARLGFS1GD9tSmJNYAJAWaquLVDM1V0OBkHZwEgvAFRCCALiqv71jicoLvTrW1q/NP9sn2+aJIQAAyTTgC+pEx4AkQhAgEWLnsCQpwDYIAKQFy7Li2yDP7TtneBoAqYwQBMBVlRV49T/vvk5Oh6Vn957Vv7zRaHokAACySsO5Xtm2VFHk1bRCr+lxgIwTO4clUY4OAOkk1gvyu0NtGg5wEgvA6AhBAIzJDXOm6ksfWixJ+vq/HaQfBACAJIqfwprOFgiQCC7HyFPjAOXoAJA2VswoVnVJrgb9Ie043G56HAApihAEwJh99pY5Wr+sQv5QWH/5o906P+A3PRIAAFlhf3OvJE5hAYnCJggApCfLsnTH8kpJ0i/fPWt4GgCpihAEwJhZlqV/+NM6zSrNU3P3kP7jD99SU9eg6bEAAMh4+ylFBxLK4bAUy0GCITpBACCd3Fk3XZL024ZW9fuChqcBkIoIQQCMS1GOW//46dUqzHFpb1O37vj/vax/28urLQAASJQhf0hH2/okScsJQYCEcTkjT48DbIIAQFpZXl2suWX5Gg6E9esDLabHAZCCCEEAjNvS6UV6/q9u0aqZJeobDuq//J939DdP79Wgn1dcAAAw2Q619CpsS2UFHlUUUYoOJErsJFaIThAASCuWZeljKyPbIL/Yw4s0AVyKEATAhNRMzdNP/3O9Nt42X5Yl/fTtM7rlv7+ov/35Pr16rIMzAgAATJILT2FZlnWVjwYwUbEQJBDm61gASDefWFktSXrlWIc6+n2GpwGQalymBwCQvtxOh76wfpFuXlCm//rTvWruHtKP32jUj99o1NR8j25ZUKaZU/M0vSRX1SW5KivwypatcFgKhsMKhGz1DQfUOxxQz2BAvcNBBUKR94fCYQXDtvI8TpUVeFVa4FVZvkdVJbmaOTVPTgffBAIAZId4Kfp0TmEBiRQ7h0UxOgCkn9ll+aqrKdHepm79cu9Z/ce1c0yPBCCFEIIAuGY3zi3Vjv/7Vu083ql/339OL+xvUdeAP2FrqB6XQ/OmFWhBeYEWVhRo3rQCzSsv0KzSPHldzoT8mgAAmLIvvglSZHgSILPFN0HYaAaAtPTxuuna29StZ/YQggC4GCEIgEnhdjr03oXT9N6F0/T1j9fqjZNd2numW2e7h9R8fkjN3UPqGvDLYVlyOSw5nZZcDocKc1wqznWrKMetwhyXctxOOR3Rj3FYGvAF1THgV0efTx39PjV3D2k4EFbDuV41nOu9aAaHJc2YkqfSAo9Kct2akudRUa5bTocl25Zs2bLt2MdasqzIY4pz3ZpVmq/ZpfmaVZanohy3gd9BAAAu5QuGdKQ1UopeSyk6kFDu6CZIkE4QAEhLH62r0jeeO6g9Td063TmgWaX5pkcCkCIIQQBMOpfTobXzy7R2ftmk/9yhsK3m80M60tqno239OtrWp+PtAzrR1q8+X1CNXYNq7Bq8pl+juiRXn75xpu6+YaZK8jyTNDkAAON3pKVfwbCtkjy3qktyTY8DZLTYudUg57AAIC2VF+Zo7fwyvXy0Q7/Yc1Z/9YEFpkcCkCIIQQCkFafD0szSPM0szdO6pRXx99u2rfY+n051Dur8oF/dg36dHwyoZygg25YsS7IU+THy8VLYjjyuc8Cv050DOtU5qPa+yLbJ379wWN/dfkx/snqGNqydrbnTCsx8wgCArBY7hbWcUnQg4VzOaAjCOSyMweOPP65/+Id/UEtLi+rq6vTd735XN9xww1Uf95Of/ESf+tSn9PGPf1zPPPNM4gcFsszHV1br5aMdemZPs/7L++fz9RMASYQgADKEZVkqL8pReVHONf08/b6gXtjfoh+8clIN53r1/75+Wj9647S++cnl+tQNMydpWgAAxmb/2UgIsoxSdCDhYp0gFKPjap566ilt2rRJW7du1Zo1a/Sd73xH69ev1+HDh1VeXn7Zx506dUpf+MIXdMsttyRxWiC7rF9WoS//3KET7QPa39yr5TP4GgqA5DA9AACkkgKvS3+yeoae/6ub9ePPrtH7Fk5T2Jb+9uf79PSuM6bHAwBkmQMXbIIASCyXI/L0OEAIgqt47LHHdN9992nDhg1aunSptm7dqry8PD3xxBOXfUwoFNKnP/1pffWrX9XcuXOTOC2QXQpz3Fq3JHI14hd7mg1PAyBVEIIAwCgsy9JN88v0ww3v0Z/Xz5JtS//303v5IgoAkDSBUFgNLbFS9CLD0wCZz805LIyB3+/Xrl27tG7duvj7HA6H1q1bp507d172cV/72tdUXl6uz3zmM2P6dXw+n3p7ey96AzA2H185XZL0zJ6zCvBnOgARggDAFVmWpf/2sWX61A0zZdvSpp/u1XPvnjM9FgAgCxxt7Zc/GFZhjkszp+aZHgfIeBSjYyw6OjoUCoVUUVFx0fsrKirU0tIy6mNeeeUV/eAHP9C2bdvG/Ots2bJFxcXF8beampprmhvIJrcuKldpvkcd/T79/nC76XEApABCEAC4Csuy9M1P1OpPVs9QKGzrwZ+8o5eO8IUUACCx9jfH+kCKKPUEksDljDw9DoYIQTB5+vr6dM8992jbtm0qKysb8+M2b96snp6e+FtTU1MCpwQyi8fl0CdXVUuSfvo2/9sBQDE6AIyJw2Hpv//xCgVCYf1iz1l947mDemH+e+Vw8E0pAEBixErR6QMBksMV3wThdAour6ysTE6nU62trRe9v7W1VZWVlZd8/PHjx3Xq1Cndeeed8feFo/8dc7lcOnz4sObNm3fJ47xer7xe7yRPD2SPP72+Rv/0ykn97lCbOvp9Kivgf09ANmMTBADGyOmw9LWP16rA69KR1n79tqH16g8CAGCC9kU3QWoJQYCkYBMEY+HxeLR69Wpt3749/r5wOKzt27ervr7+ko9fvHix9u3bpz179sTfPvaxj+m2227Tnj17OHMFJMiiykLVzShWMGzrmXfo9gSyHSEIAIxDca5b99TPkiQ9vuO4bJsnyQCAyRcMhdVwLlKCyyYIkBxuNkEwRps2bdK2bdv05JNPqqGhQffff78GBga0YcMGSdK9996rzZs3S5JycnJUW1t70VtJSYkKCwtVW1srj8dj8lMBMtqfXh8JGX/6dhPP3YEsRwgCAOP0n9bOkdfl0N6mbr12vNP0OACADHS8fUDDgbAKvC7NLs03PQ6QFShGx1jddddd+ta3vqWHH35YK1eu1J49e/TCCy/Ey9IbGxt17tw5w1MCuLNuurwuh4609uvdMz2mxwFg0IRCkMcff1yzZ89WTk6O1qxZozfffHNMj/vJT34iy7L0iU98YiK/LACkhGmFXn3qhpmSpMdfPGZ4GgBAJoqdwlo6vYj+KSBJ3JzDwjhs3LhRp0+fls/n0xtvvKE1a9bE/92OHTv0wx/+8LKP/eEPf6hnnnkm8UMCWa44160P1Ua6eihIB7LbuEOQp556Sps2bdIjjzyi3bt3q66uTuvXr1dbW9sVH3fq1Cl94Qtf0C233DLhYQEgVdz33rlyOSy9drxTuxvPmx4HAJBh9jdTig4kG5sgAJB5/kP0JNaze89qOBAyPA0AU8Ydgjz22GO67777tGHDBi1dulRbt25VXl6ennjiics+JhQK6dOf/rS++tWvau7cudc0MACkguqSXH1yVbUk6XtsgwAAJtn+eCl6keFJgOzhckZDkBCdIACQKernlqq6JFd9w0H96kCL6XEAGDKuEMTv92vXrl1at27dyE/gcGjdunXauXPnZR/3ta99TeXl5frMZz4zpl/H5/Opt7f3ojcASDWfu3WeLEv6bUNbvLwWAIBrFQrbOnCWUnQg2dyO6DksNkEAIGM4HJb+9PoZkjiJBWSzcYUgHR0dCoVC8bKvmIqKCrW0jJ6mvvLKK/rBD36gbdu2jfnX2bJli4qLi+NvNTU14xkTAJJi3rQC3VFbJUn6/ksnDE8DAMgUJ9r7NRQIKc/j1JyyAtPjAFnDGd8EIQQBgEzyx9fNkGVJrx7r1KmOAdPjADBgQsXoY9XX16d77rlH27ZtU1lZ2Zgft3nzZvX09MTfmppIagGkpv908xxJ0m8PtirA6QQAwCTYfzZail5VFO8oAJB47ngnCF/TAUAmqZmap/cumCZJ+vGbjYanAWCCazwfXFZWJqfTqdbW1ove39raqsrKyks+/vjx4zp16pTuvPPO+PvC0S8oXS6XDh8+rHnz5l3yOK/XK6/XO57RAMCIVTUlmprvUdeAX7tOn9eNc0tNjwQASHP7zkROYdVyCgtIKpeTc1gAkKn+7MZZ+v2Rdv3r203a9MGFynE7TY8EIInGtQni8Xi0evVqbd++Pf6+cDis7du3q76+/pKPX7x4sfbt26c9e/bE3z72sY/ptttu0549ezhzBSDtORyW3rcw8oqSHYfbDU8DAMgEI6XohCBAMrkcFKMDQKZ6/+JyTS/O0fnBgP59/znT4wBIsnGfw9q0aZO2bdumJ598Ug0NDbr//vs1MDCgDRs2SJLuvfdebd68WZKUk5Oj2trai95KSkpUWFio2tpaeTyeyf1sAMCAWxfFQpA2w5MAANJdOGzrQPQcFqXoQHK5op0gATpBACDjOB2WPnXDTEnSv7zOSSwg24zrHJYk3XXXXWpvb9fDDz+slpYWrVy5Ui+88EK8LL2xsVEOR0KrRgAgpdyyYJosSzrU0qdzPUOqKs41PRIAIE2d7BzQgD+kHLdD86blmx4HyCrO6PPYEOewACAj3XVDjf6f7Ue16/R5NZzr1ZKqItMjAUiSCaUVGzdu1OnTp+Xz+fTGG29ozZo18X+3Y8cO/fCHP7zsY3/4wx/qmWeemcgvCwApaWq+R3UzSiRJv+ckFgDgGsROYS2pKor3EwBIDorRASCzlRfmaP2ySKfxv7x+2vA0AJKJZ1YAMAlGTmIRggAAJi4WgnAKC0i+eDE657AAIGN9+sbISaxn3mlWvy9oeBoAyUIIAgCT4LZF5ZKkV491KECZJgBggvZRig4YEy9G5xwWAGSs+rmlmjctXwP+kH7+TrPpcQAkCSEIAEyC5dXFKs33qM8X1K7T502PAwBIQ+GwrQPNvZKk2umEIECyjRSj84IWAMhUlmXpz26cJUn60eunZdsE30A2IAQBgEngcFh670JOYgEAJq6xa1B9vqA8LocWVBSYHgfIOrFNEIrRASCz/dF1M5TrdupQS5/ePNllehwASUAIAgCTZKQXpM3wJACAdLTvglJ0N6XoQNLFOkECdIIAQEYrznXrk9dVS5J+8MpJw9MASAaeXQHAJLllwTRZlnSopU/neoZMjwMASDOxUvTa6UWGJwGy08gmCOewACDT/ae1syVJv2lo1enOAbPDAEg4QhAAmCRT8z2qm1EiSfo9J7EAAOMU2wRZTik6YESsE4RidADIfPPLC/W+hdNk29L/fvWU6XEAJBghCABMotsWlUuiFwQAMD62bY9sghCCAEa4HLFzWGyCAEA2+MzNcyRJ//p2k3qHA4anAZBIhCAAMIlivSCvHuugVBMAMGaNXYPqHQ7K43RoYUWh6XGArEQxOgBkl1sWlGlhRYEG/CH99K0m0+MASCBCEACYRLXVxcrzONXnC+pYW7/pcQAAaSJ2CmtxVaE8Lr5EB0ygGB0AsotlWfpPayPbIP/71VMKsgkIZCyeYQHAJHI6rHgvyDuN580OAwBIG/ubeyVxCgswKdYJwiYIAGSPT6yq1tR8j5q7h/Trg62mxwGQIIQgADDJrptVIknaTQgCABij/ZSiA8bFzmHRCQIA2SPH7dSn18yUJP3glZOGpwGQKIQgADDJVtVMkSS909htdhAAQFqwbTt+DosQBDAnVoweZBMEALLKPTfOkttpadfp81x0ADIUIQgATLKVM0skSUfb+tUzFDA7DAAg5Z05P6SeoQCl6IBhnMMCgOxUXpSjj6+sliR9b8dxw9MASARCEACYZGUFXs2cmidJevdMt9lhAAApL7YFsqiSUnTAJM5hAUD2uv/WebIs6TcHW9Vwrtf0OAAmGc+yACABVkW3QTiJBQC4mlgIQik6YJbbGXl6zCYIAGSfedMK9JHlVZKk//niMcPTAJhshCAAkACrakokUY4OALg6StGB1OCMb4IQggBANtr4/vmSpOf3ndOxtj7D0wCYTIQgAJAAq2aOlKPbNk+kAQCju7AUvba6yPA0QHZzRztBgmHOYQFANlpcWaTbl1bItqXvvUg3CJBJCEEAIAGWVBXJ63KoZyigkx0DpscBAKSoM+eH1D0YkNtpaVElpeiASU5H9BwWmyAAkLVi2yC/2HtWpzt5Lg9kCkIQAEgAj8sRP2tCLwgA4HJip7AWVhTK63IangbIbvFidDZBACBrrZhRovctnKZQ2NY/7mAbBMgUhCAAkCDxcvQmekEAAKPbRx8IkDIoRgcASNJffSCyDfL/7T6j5u4hw9MAmAyEIACQIBf2ggAAMJqRPhBCEMC0C4vR6XQDgOy1etZU1c8tVSBk67vbj5oeB8AkIAQBgASJbYIcaunToD9odhgAQMqxbTt+DotNEMC8WDG6xDYIAGS7L6xfKEl66u0mHTjbY3gaANeKEAQAEqSqOFeVRTkKhW29e4YvmgAAF2vuHtL5wYBcDkrRgVQQ2wSRpCAhCABktdWzpuqjK6pk29LXf3mQDUEgzRGCAEACxXtBOIkFAPgDF5ai57gpRQdMi3WCSIQgAADpSx9eLI/LoddPdOnXB1tNjwPgGhCCAEACjYQglKMDAC5GKTqQWlwXbIKEQoQgAJDtZkzJ01/cMleS9OjzDfIFQ4YnAjBRhCAAkEDxcvSmbtZnAQAX2dfcK0mqnUEIAqSCC89hBcJhg5MAAFLF/bfO07RCr053DurJ106ZHgfABBGCAEACLa8ulsthqb3PpzPnh0yPAwBIEZSiA6nHsqz4NkiQTRAAgKR8r0t/s36RJOm724+po99neCIAE0EIAgAJlON2aklVkSRRjg4AiGvuHlLXgF8uh6XFlKIDKSO2DRJkEwQAEPXH181QbXWR+nxBPfpcg+lxAEwAIQgAJNiK6JmTvWe6zQ4CAEgZlKIDqSlWjs4mCAAgxuGw9NWPLZPDkn72TrN+8maj6ZEAjBMhCAAkWF1NiSRpT1O30TkAAKkjVoq+gj4QIKW4nLFNEEIQAMCI1bOm6r/eHjmL9fCzB7SPSw9AWiEEAYAEWxkNQfad6VEwxGkFAMDIicRa+kCAlOLiHBYA4DLuf988rVtSIX8wrM/9yy6dH/CbHgnAGBGCAECCzZtWoHyPU0OBkI6195seBwBgGKXoQOpyOTiHBQAYncNh6dv/oU6zSvPU3D2kB5/aoxCbg0BaIAQBgARzOiwtj/WCcBILALLemfNDOj8YkNtpaXEVpehAKhkpRuebWgCASxXnurX1z1Yrx+3QS0fa9fcvHFKYvzOAlEcIAgBJMNILwt1QAMh2F5aie12UogOpxB3rBOGEKQDgMpZUFembn1guSfpfL53QZ558S12cxgJSGiEIACTByhklkqR3z3QbnQMAYB6l6EDqcjkjT5EDnMMCAFzBH6+eoW9+slYel0MvHm7Xh/+fl/T6iU7TYwG4DEIQAEiC2CbIoZY+DQdCZocBABgVC0EoRQdST6wYnRvvAICr+fSaWXrmL9dq7rR8tfb6dPe21/Xo8w1qONcr2+bvESCVuEwPAADZoKo4R9MKvWrv8+nA2R6tnjXV9EgAAANs2x7ZBKkuMTsMgEu4ouewAmHOYQEArm7p9CL98r/crId/cUBP7zqj7790Qt9/6YSqS3K1bkm51s4v07RCr0ryPCrJdaswxyWnw5JlWaZHB7IKIQgAJIFlWaqbUaLfNrRqTxMhCABkqzPnh9QdLUVfWFlgehwAf8DpiBxLCHEOCwAwRnkel771p3Vat6RCT+9q0stHO9TcPaQnd57WkztPX/XxLocl5wVveR6nCrwuFeZEQpOiHLeKcl0qynWrONetQq9LXrdTXpdDXpdTbqcl25ZCti3bthUKS8FwWP5gWIGQrWA4rGDIVti2FQrbCv3BloolS5YlOayR/2xZlv4wpok9ajxbLmHblm1HHhu2bYXDkflC4bAuXLqMZUJ/+GuGbFvBkK1g2FYwFI7PYCkyo8Oy5HJacjmib06HnI7o+x2WHA5LdnSGsG3LVmTbMxAKR3+0L/l8/vDzdFhW5Pc6+nue43bGf26nFfn/WdiOzBgKR370BUIa8oc0FIi81c0o0SdWVY/59w2TjxAEAJJkZU2xftvQqr1N3aZHAQAYEtsCWVRJKTqQitzRc1hBNkEAAOP0odpKfai2UkP+kF491qHfNrRqX3OPeoYC6hkMqM8XHPVxweg3zmP6hoNqlS9ZYyNJVs0s0azSfNNjZC1CEABIklgvyF7K0QEga717JhKCLOcUFpCS4uew2AQBAExQrsepdUsrtG5pxUXvD4TC6h8OKhzdMLCl+IZCMGwrFLIVCIc15A+pbziovuFA/MeeoaB6hgLqHQ6ofzgofygsXzAkXyCsQCgc3YqIbC04LEtulyWXwyG30yG38+JNE6dlxTcvYksQYVuyNbIx8YfVWLZtx7dDLGtkE+NKLnyMorPFNihc0W2NCz92tL95HZYVnd8RfczI75sU2RQJhWPbIpHNl3B02yW2lRH7fbGimy6x34/IBo5Dzgsas2Of04WfWShsyxeM/H4PByI/xrZZQrYUDtvxzyn2+5vjdijX41SO26mXj3boWFu/ntt3Tn956/wr/p4hcQhBACBJYrffT3cO6vyAX1PyPWYHAtLY448/rn/4h39QS0uL6urq9N3vflc33HDDqB+7bds2/fM//7P2798vSVq9erUeffTRy348kEj7m2MhCKXoQCpyxc5hUYwOAJhkbqeD7wNkof/zZqM2/2yffrmXEMQkx9U/BAAwGYrz3JpTFll9ZBsEmLinnnpKmzZt0iOPPKLdu3errq5O69evV1tb26gfv2PHDn3qU5/Siy++qJ07d6qmpka33367mpubkzw5st1FpegzCEGAVDSyCcI5LAAAcO3WL6uU02Hp4LlenWjvNz1O1iIEAYAkqot+0yt2DgXA+D322GO67777tGHDBi1dulRbt25VXl6ennjiiVE//kc/+pH+8i//UitXrtTixYv1T//0TwqHw9q+fXuSJ0e2a+oaUs9QQB6nQwsrCk2PA2AUrmgnCJsgAABgMkzN92jt/DJJ0nPvnjM8TfYiBAGAJIr3glCODkyI3+/Xrl27tG7duvj7HA6H1q1bp507d47p5xgcHFQgENDUqVMv+zE+n0+9vb0XvQHX6sJSdI+LL8OBVBQ7hxUgBAEAAJPko8urJEnP7SMEMYVnXwCQRBeWo9s2T66B8ero6FAoFFJFxcUlgxUVFWppaRnTz/HFL35R06dPvyhI+UNbtmxRcXFx/K2mpuaa5gYk6d3mbknSck5hASkrdg4ryDksAAAwSdYvq5TbaelQS5+OtfWZHicrEYIAQBItrSqSy2Gpo9+v5u4h0+MAWefv/u7v9JOf/EQ///nPlZOTc9mP27x5s3p6euJvTU1NSZwSmYpSdCD1cQ4LAABMtuI8t26OnsT6JSexjCAEAYAkynE7taSqSJK0u7Hb7DBAGiorK5PT6VRra+tF729tbVVlZeUVH/utb31Lf/d3f6df//rXWrFixRU/1uv1qqio6KI34FrYtq19ZwhBgFTnckbPYYUIQQAAwOT56IrpkiIhCJdBko8QBACS7IY5kR6Cncc7DU8CpB+Px6PVq1dfVGoeKzmvr6+/7OP+/u//Xl//+tf1wgsv6Prrr0/GqMBFGrsG1TsclMdFKTqQykY2QTiHBQAAJs8Hl1XI43ToWFu/jrT2mx4n6xCCAECS3TSvVJK083iH4UmA9LRp0yZt27ZNTz75pBoaGnT//fdrYGBAGzZskCTde++92rx5c/zj//t//+966KGH9MQTT2j27NlqaWlRS0uL+vv5whPJsze6BbKkqohSdCCFxTpB2AQBAACTqSjHrfcunCZJ+uW7Zw1Pk314BgYASXbDnKlyOiyd6hykFwSYgLvuukvf+ta39PDDD2vlypXas2ePXnjhhXhZemNjo86dG7mz+o//+I/y+/36kz/5E1VVVcXfvvWtb5n6FJCF9p3pliSt4BQWkNJcjshT5CCbIAAAYJJ9dEWVJOk5TmIlncv0AACQbQpz3Foxo1jvNHZr5/FO/cnqGaZHAtLOxo0btXHjxlH/3Y4dOy7651OnTiV+IOAq3o31gcwgBAFSWewcVpBidAAAMMnWLa2Qx+XQiY4BHW3r50xuErEJAgAGxE5ivXaMk1gAkOnCYVv7myMhyApCECClxYrRg5zDAgAAk6zA69KSqiJJ0smOAcPTZBdCEAAw4KZ5ZZKk1453sgIJABnuREe/Bvwh5bqdmj+twPQ4AK4g1tnjC4YMTwIAADJReaFXktTW5zM8SXYhBAEAA1bPmiKPy6GW3mHSfwDIcLFTWMumF8VfZQ4gNeV5nJKkQT8hCAAAmHyxEKSdECSpeBYGAAbkuJ1aPXOKJOnV452GpwEAJBJ9IED6iIUgQ4QgAAAgAcoLcyRJ7X3DhifJLoQgAGBIrBdk53F6QQAgk+2jDwRIG7luNkEA/P/bu/Moqesz3+OfqupauuidphsaGlpQJKjQAgHRMEanR2Z0jM4ZbziaIMM1ZlFy54S5GSUa2oQZIV7H8Z6EyARDzLlDgjE3OolyMEoguSiRhE0joIEGmsVe6X2t5Xf/qK5CNqWb31L16/frnDpK+avi6a809XQ99TwPAFhnVHIcVjudIHaiCAIADrn+8mQRpFnxOHtBAMCNorG43j2ZLIIUOBsMgI8VDmRJohMEAABYIzUOq5MiiJ0oggCAQ6aNK9CIgE8t3REdqOtwOhwAgAX+3NCp3khcOcEsXTZyhNPhAPgYqZ0gkajDkQAAADcqyaMTxAkUQQDAIX6fV7MvK5IkvclILABwpXcG9oFcPTZPXq/H4WgAfBwWowMAACslx2E1dfYxFcRGFEEAwEHXTyqWJL3JcnQAcKW3T7RKYhQWkCkYhwUAAKxUnBOUxyNF44ZauvudDmfYoAgCAA6aO7Ac/a2aZkVicYejAQCY7e2BTpBrxrIUHcgE2XSCAAAAC/l9XhWFA5Kkhg5GYtmFIggAOGjqmDwVhP3q6o+l3igDALhDXzSm/R+0S5Km0wkCZITkOCw6QQAAgFWSI7EogtiHIggAOMjr9WjuxEQ3yJYDDQ5HAwAw0/t1nYrEDOVn+1VelO10OAAuQrII0h+LK0qXLgAAsECyCNJIEcQ2FEEAwGG3TRsjSVr/1lF190cdjgYAYJbT+0Dy5fGwFB3IBMlxWJLUHaEbBAAAmO90J0ivw5EMHxRBAMBhf3P1GE0YGVZLd0TP/+GY0+EAAEzy9jH2gQCZJuDzyudNFC0ZiQUAAKxQkhuSJDW00wliF4ogAOAwn9ejL/7FREnSs//vMAvSAcAl3j6RKIJMYx8IkDE8Ho/C/kQ3SFcfHboAAMB8JclxWJ0UQexCEQQA0sDfzxin4pygTrT26Fd7TzodDgDgEvVGYnq/vkNSYhwWgMyRHInVTScIAACwQGonCJ0gtqEIAgBpIOT36b9/qkKStOa3hxSPG84GBAC4JO+ebFcsbqg4J6Ax+SGnwwEwCMnl6D3sBAEAABagE8R+FEEAIE18/roJyg1m6f36Tv3mQIPT4QAALsHeY62SpOnjCliKDmSY7ECWJDpBAACANUrykjtBWIxuF4ogAJAm8kJ+3XPdeEnSM7895HA0AIBL8fbxVknsAwEyUaoTpJ+dIAAAwHzJcVhd/TF2kNmEIggApJH7brhMAZ9XO4+26A9HTjkdDgBgiPYeTyxFn17OPhAg04TZCQIAACyUE8xK5RuNHYzEskOW0wEAAE4ryQvp72eO00931Oprz+/R9z83w9RPEcfihho6enW8pUcnWnrU1R9VNGYoEosrGjfkkeTzepTl9cjn8yro8yoc9GlEIPECHQ5kKeT3KpjlU9Dvld/nVTQWV180cYvE4gpkeZUTzNKIYJbCfp+8XsbAABhe2rojOtzUJSkxDgtAZsn2UwQBAADWKskN6khztxo6+lRRPMLpcFyPIggApJn/8ZeXa9vBRh071aO7ntmuR//2E1p43YRBz5Tvj8a1/4N27a5t0e5jrXr7eJuOt3QrErNv6brHI90ytVRrPj+TmfgAho23T7RKksYXhVU4IuBsMAAG7fQ4LIogAADAGqNSRRD2gtiBIggApJkx+dl6+avz9PUX9urX++q1/L/e1VuHT+nxO69Rfth/wce1dUe0s/aU/nCkRTuPtGjv8Vb1RePnXOfzelRWENK4grByQ1ny+7yJ7g9fokgRixuKxg3FYob6ojF19yduXf1RdffF1BeNqS8aV28kpriRKHQEs7wK+LwKZHnVF42rqy+quCEZhvTqu/V69d16/fXVoy07MwBIJ6ml6OUFjsYBYGhYjA4AAKxWkptYjs44LHtQBAGANJSf7dd/LJypdW8c0cqN+/XK2x/olbc/0NiCbE0qydGkUSMU8vt0oqVHJ1oTo63q2s/99EBB2K9rywt07fhCVZYXaFJJjkpzg8rymbMSKhY35PXonC4PwzDUG4nrf2/+s9b89pCeePWAqj5RYtrvCwDpLLUPZBz7QIBMlNoJEmFRKQAAsEZyOXoDRRBbUAQBgDTl8Xh036cu07XjC/Q/X9irmsauRMGjtUe/e7/xvI+ZWDxCsyoKNWtCkWZWFGpi8QhLx1D5LrDvw+PxKDvg0wM3TdLzf6hVTWOXXth5XHfPHm9ZLACQDgzD0B46QYCMxjgsAABgtVQRpJ0iiB0oggBAmpsxvlC/+adPq6WrX4caO3WwIXGLxOIqK8jWuMKwxhZma0Iazp7PC/m15OYrtOLlfXr69fd1Z+VYZQ+8sQAAblTX3qvGjj75vB5dVZbndDgAhiCZqzAOCwAAWKVkoAjS2EkRxA4UQQAgQxSOCGjWiCLNqihyOpRB+fx147Vu22GdaO3Rj948rAc+fbnTIQGAZfYeS4zCuqIkR+EAqTaQicJ+OkEAAIC1SvISO0EazjPaHOZjODsAwFLBLJ/+6ZbJkqRnth5Sa3e/wxEBgHX2Hm+VJFUyCgvIWOHUYnR2ggAAAGuMyhnoBGEniC0oggAALHdH5VhNGZ2rjt6ovr/1kNPhAIBl3h4ogrAPBMhcjMMCAABWK8lLFEFOdfcrEos7HI37UQQBAFjO5/Xoob+ZIkl67s0jauuOOBwRAJgvHjf09sA4rGnj8h2OBsBQpRajRyiCAAAAaxSFA/J5PTIMqbmTiRlWowgCALDFpyePUll+SP3RuP7c0OF0OABgupqmLnX0RRXyezW5NNfpcAAMEZ0gAADAal6vR8U5AUmMxLIDRRAAgC08Ho8qikdIko42dzscDQCYLzkK66qyfPl9pNlApkruBGExOgAAsFJJ7sBy9A6Wo1uNn84AALaZMDIsSTra3OVwJABgvr3HWiVJ08cVOBoHgEsTTnWCsBgdAABYZ1RuYi9IA50glqMIAgCwzYSRA50gp+gEAeA+e48n9oFML2cfCJDJsv2MwwIAANYrGSiCMA7LehRBAAC2mVCU6AQ5wjgsAC7TH41r38l2SXSCAJku2QnSF40rFjccjgYAALhVSaoThHFYVqMIAgCwTbITpJZxWABc5kBdu/pjceVn+1Oj/wBkpuROEEnqidANAgAArJEah9VOJ4jVKIIAAGwzfuCNwZbuiNp6Ig5HAwDm2TOwD2TauHx5PB5ngwFwSUJ+r5LfxuwFAQAAVhk1sBi9sZMiiNUoggAAbJMTzFJxTkCSVMtILAAusru2VZI0Y3yhs4EAuGQejye1F6SHvSAAAMAiJXl0gtiFIggAwFanl6MzEguAe+yqbZEkzZhAEQRwg+ReEJajAwAAq4zKOb0Y3TDYQ2YliiAAAFsll6MfpRMEgEs0d/al/k6rZCk64ArZFEEAAIDFkjtB+mNxtfcwgtNKFEEAALZKdYKwHB2ASyT3gUwaNUL5Yb+zwQAwRdifWI7OOCwAAGCVkN+n/OzEzw8NHb0OR+NuFEEAALaaMJJOEADukhqFxT4QwDVOd4LwqUwAAGCdZDdIQwd7QaxEEQQAYKvxFEEAuExyKfq1FEEA10juBOmJ0AkCAACsU5J7ei8IrEMRBABgq4qBcVh17b3q5Y0FABkuFje0d2Ac1owJBY7GAsA8LEbHx1m9erUqKioUCoU0Z84c7dix44LXrl27VvPmzVNhYaEKCwtVVVX1kdcDAIaP050gjMOyEkUQAICtCsN+5QYTc7ZrT9ENAiCzvV/foa7+mEYEfLqiJNfpcACYJDuQyFUoguB8nn/+eS1dulTV1dXatWuXpk+frvnz56uhoeG812/dulV33323tmzZou3bt6u8vFy33HKLTpw4YXPkAIB0U5yTKII0d/Y7HIm7UQQBANjK4/FoQjEjsQC4Q3IU1vTyAvm8HmeDAWCasH9gHBY7QXAeTz31lO6//34tXrxYU6dO1Zo1axQOh7Vu3brzXr9+/Xo98MADqqys1JQpU/Tss88qHo9r8+bNNkcOAEg3ySJIYyfjsKxEEQQAYLsJRYmRWEebuxyOBAAuDUvRAXdKLkbvohMEZ+nv79fOnTtVVVWVus/r9aqqqkrbt2+/qOfo7u5WJBJRUVGRVWECADJEcU5AktREJ4ilspwOAAAw/ExgOToAl9g9UAS5dnyBs4EAMFVqMTpFEJylqalJsVhMpaWlZ9xfWlqqAwcOXNRzPPTQQyorKzujkHK2vr4+9fWd/lRwe3v70AIGAKS1ZCdIE4vRLUUnCADAdqkiCDtBAGSw1u5+HWpMdLRdSycI4CqnF6MzDgvmWrVqlTZs2KAXX3xRoVDogtetXLlS+fn5qVt5ebmNUQIA7JLaCdJFEcRKFEEAALYbzzgsAC6w51irJKliZFhFIwLOBgPAVCxGx4UUFxfL5/Opvr7+jPvr6+s1evToj3zsk08+qVWrVunXv/61pk2b9pHXLlu2TG1tbanbsWPHLjl2AED6Kc5N/BzR3NmveNxwOBr3oggCALBdxcBi9BMtPYrG4g5HAwBDk1yKzj4QwH0Yh4ULCQQCmjlz5hlLzZNLzufOnXvBxz3xxBNasWKFNm3apFmzZn3s7xMMBpWXl3fGDQDgPiNHJDpBonFDbT0Rh6NxL4ogAADbleaGFMjyKho3dLK11+lwAGBIdrEPBHCt0+OwKILgXEuXLtXatWv14x//WPv379dXvvIVdXV1afHixZKke++9V8uWLUtd/53vfEff/OY3tW7dOlVUVKiurk51dXXq7Ox06ksAAKSJQJZX+dl+SVJTJyOxrMJidACA7bxejyYUhfXnhk4dae7S+IEdIQCQKeJxIzUOi30ggPtk+weKIBGKIDjXggUL1NjYqOXLl6uurk6VlZXatGlTall6bW2tvN7Tnzl95pln1N/fr7vuuuuM56murtZjjz1mZ+gAgDQ0Miegtp6Imjr7dUWp09G4E0UQAIAjJoxMFEFYjg4gEx1q7FRHb1Qhv1dTRuc6HQ4Ak4UHdoL0sBgdF7BkyRItWbLkvP9t69atZ/z6yJEj1gcEAMhYxTlB1TR20QliIcZhAQAckVqO3sRydACZJ7kPZNq4AmX5SKkBtwkHGYcFAADsMSonsReEIoh1+IkNAOCI5HJ0OkEAZKI/Hj0liaXogFuxGB0AANilOCcgiSKIlSiCAAAcMb5ooAjSTCcIgMzz1uFEEWTOZUUORwLACmF/YhwWnSAAAMBqI5OdIB39DkfiXkMqgqxevVoVFRUKhUKaM2eOduzYccFr165dq3nz5qmwsFCFhYWqqqr6yOsBAMNDxcjEOKzaU90yDMPhaADg4tW19epoc7e8HmlmBZ0ggBtlJztBIjHF4+QpAADAOsUDRZDmLjpBrDLoIsjzzz+vpUuXqrq6Wrt27dL06dM1f/58NTQ0nPf6rVu36u6779aWLVu0fft2lZeX65ZbbtGJEycuOXgAQOYaW5gtn9ej3khcDR280APIHDuOJLpAppblKS/kdzgaAFZIjsOSpN4o3SAAAMA6yXFYjZ10glhl0EWQp556Svfff78WL16sqVOnas2aNQqHw1q3bt15r1+/fr0eeOABVVZWasqUKXr22WcVj8e1efPmSw4eAJC5/D6vxhZkS5KOsBwdQAbZcbhZkjS7YqTDkQCwSrb/dBGEkVgAAMBKxbnJcVh8QNQqgyqC9Pf3a+fOnaqqqjr9BF6vqqqqtH379ot6ju7ubkUiERUVXXh+cl9fn9rb28+4AQDcJ7kX5FhLj8ORAMDF2zGwD2Q2+0AA1/J6PQr5Ez8usxwdAABYqXjEQBGks49x4RYZVBGkqalJsVhMpaWlZ9xfWlqqurq6i3qOhx56SGVlZWcUUs62cuVK5efnp27l5eWDCRMAkCHGFSY6QY63dDscCQBcnFNd/Xq/vlMSRRDA7cIBlqMDAADrFecmxmH1RePqIu+wxJAWow/VqlWrtGHDBr344osKhUIXvG7ZsmVqa2tL3Y4dO2ZjlAAAuyTHYZ2gEwRAhkh2gUwuzVHRiIDD0QCwUnIkVnd/1OFIAACAm4UDWal9ZIzEskbWYC4uLi6Wz+dTfX39GffX19dr9OjRH/nYJ598UqtWrdLrr7+uadOmfeS1wWBQwWBwMKEBADLQuKJkJwhFEACZgVFYwPCRfDOCcVgAAMBqxTlB1Z7qVlNnnyqKRzgdjusMqhMkEAho5syZZyw1Ty45nzt37gUf98QTT2jFihXatGmTZs2aNfRoAQCuMq4wsRPkeCvjsABkhh1HBpaiX8ZSdMDtkkUQxmEBAACrFeckusybOukEscKgOkEkaenSpVq0aJFmzZql2bNn6+mnn1ZXV5cWL14sSbr33ns1duxYrVy5UpL0ne98R8uXL9dPfvITVVRUpHaH5OTkKCcnx8QvBQCQaZLjsD5o7VUsbsjn9TgcEQBcWHtvRPtOtkuSZlfQCQK4XXayCBKhCAIAAKw1MicxFamxs9/hSNxp0EWQBQsWqLGxUcuXL1ddXZ0qKyu1adOm1LL02tpaeb2nG0yeeeYZ9ff366677jrjeaqrq/XYY49dWvQAgIxWmhdSltejaNxQfXuvygaKIgCQjnYebVHckCaMDGt0/oX32wFwh+Ri9B52ggAAAIsVDxRBmukEscSgiyCStGTJEi1ZsuS8/23r1q1n/PrIkSND+S0AAMOAz+tRWUG2ak9163hLD0UQAGntrZrEPpA57AMBhoVsxmEBAACbjGIclqUGtRMEAACzJUdinWAvCIA0t+Mw+0CA4STspwgCAADsUZyb6ARp6mAclhUoggAAHDWuMFEEOX6qx+FIAODCevpjevt4myQ6QYDhIrkYvYciCAAAsNjIEQNFEDpBLEERBADgqLHJIkgLRRAA6Wt3bYuicUNj8kOp4i0Ad8se2AlCJwgAALBa8cA4rOYuOkGsQBEEAOCocYVhSdKJVoogANLXW4dP7wPxeDwORwPADqlOkAiL0QEAgLVOj8OiE8QKFEEAAI5KjcNqYScIgPT15qEmSewDAYaTMIvRAQCATYpzEkWQjr6oeiPkHmajCAIAcFRyMfrJ1l7F44bD0QDAuVq7+7XzaIsk6cYrRzkcDQC7ZFMEAQAANskLZSngS7xVz14Q81EEAQA4akx+SD6vR/2xuBp5oQeQhn77fqPihnRlaW6qcAvA/ViMDgAA7OLxeDRyYC9IUyd7QcxGEQQA4Kgsn1ej80KSGIkFID1tOdAgSbppSonDkQCwU7Y/uRidnSAAAMB6yZFYzXxA1HQUQQAAjhub2gvCcnQA6SUWN/Tb9xslSTdTBAGGFXaCAAAAOxWnOkEogpiNIggAwHHjKIIASFN7jrWopTui/Gy/ZowvcDocADaiCAIAAOyU7ARhHJb5KIIAABw3rjAsiSIIgPSzeX9iFNZfTB6lLB+pMzCcsBgdAADYaeRAEaSxg04Qs/GTHADAceMGFg2faKUIAiC9/GZgH8jNU0Y5HAkAu4UDiZ0gPewEAQAANkiOw2ruohPEbBRBAACOOz0Oi8XoANLHydYeHajrkMcj3TiZfSDAcJMahxWJyTAMh6MBAABuNyp3YBwWnSCmowgCAHBccjH6iZYe3mQAkDa2vJfoArm2vEBFIwIORwPAbslxWIYh9UXjDkcDAADc7vROEIogZqMIAgBw3Jj8bHk8iTcYWAAGIF1sSY3CogsEGI7Cfl/q39kLAgAArDZyYBwWRRDzUQQBADgukOXV6LyQJEZiAUgPvZGY3jjYLEm6iSIIMCxl+bwK+BI/MnezFwQAAFgs2QnS0h1RJEYXqpkoggAA0sLYguReEJajA3De72ua1ROJaXReSFPH5DkdDgCHJEdi9dAJAgAALFYYDsjrSfx7C8vRTUURBACQFpLL0U+0UgQB4LzkKKybpoySx+NxOBoATkktR6cIAgAALObzelQ0ItEN0shILFNRBAEApIVxhWFJjMMC4LxY3NBr++olSTddySgsYDjLpggCAABsVJzaC0IniJkoggAA0sLYQsZhAUgPW99r0Mm2XhWE/fqLyaOcDgeAg5KdID0RdoIAAADrJfeCNHXQCWImiiAAgLSQGodFEQSAw/7P749Kkj47q1whv8/haAA4KezPkkQnCAAAsEeyE6S5iyKImSiCAADSwulxWD0yDMPhaAAMV0ebu/Tb9xslSZ+bM97haAA4jXFYAADATqlOEMZhmYoiCAAgLYzJD0mSeiIxtXRHHI4GwHC1/q1aGYZ04+RRmjByhNPhAHBYahwWRRAAAGCD0QPvjdQ0djkcibtQBAEApIWQ36eS3MQnHliODsAJvZGYfvbHY5Kke+dOcDgaAOmAThAAAGCn6yaOlCRtP9Skvij5h1koggAA0gbL0QE46Vd7T6q1O6KxBdn69JUlTocDIA2c7gRhMToAALDe1DF5GpUbVFd/TH880uJ0OK5BEQQAkDaSe0FqT9EJAsB+/zmwEP1z142Xz+txOBoA6SAcYDE6AACwj9fr0acnj5IkbTnQ4HA07kERBACQNqaNzZckvbav3uFIAAw3e4+1au/xNgV8Xi2YVe50OADSRLZ/YBxWhCIIAACwx01TEl3pW96jCGIWiiAAgLRxR2WZfF6Pdh5tUU1jp9PhABhGkl0gt00bo5E5QYejAZAuWIwOAADs9qkriuXzenSosUu1zUzKMANFEABA2ijJC+nGgbbPn+887nA0AIaLgw0d+q+9JyVJn7+OhegATgunFqOzEwQAANgjL+TXrAmFkqSt79MNYgaKIACAtHLXzHGSpF/sOqFY3HA4GgBu19UX1Zf/c5f6o3HNu6JYM8YXOB0SgDSSPbATpKuPThAAAGCf1Egs9oKYgiIIACCt/OUnSlQQ9quuvVfbDjY5HQ4AFzMMQ4+8+I4ONnSqNC+of19QKY+HhegATrusOCxJeutws442dzkcDQAAGC5uujJRBHnzULN62U12ySiCAADSSjDLpzuml0liJBYAa/1kR61e2nNSPq9H37tnhorZBQLgLDPGF2reFcWKxAyt3HjA6XAAAMAwMbk0R2X5IfVF4/p9TbPT4WQ8iiAAgLRz18xySdKr79aprTvicDQA3Oid42361i/3SZIe+usr9cmKIocjApCOPB6PHr1tqrweadO7dbwJAQAAbOHxePTpgZFYW99rdDiazEcRBACQdq4em6cpo3PVH43rV2+fdDocAC7zpxNt+sr6neqPxfVXU0t1/7yJTocEII1dOTpXd88eL0la8fI+dpYBAABbJEdi/eZAgwyD/ONSUAQBAKQdj8eTWpD+AiOxcB6rV69WRUWFQqGQ5syZox07dnzk9S+88IKmTJmiUCika665Rhs3brQpUqSThvZeff2Fvbr9e9t0vKVH5UXZevK/TWcPCICPtfSvJis3mKV3T7br/+4iNwEAANa7ftJIBXxe1Z7q1uEmdpNdCoogAIC0dEflWPm8Hu091qo/13c4HQ7SyPPPP6+lS5equrpau3bt0vTp0zV//nw1NDSc9/o333xTd999t+677z7t3r1bd955p+6880796U9/sjlyOCEeN3SwoUPf3fxnffrJrXph53EZhnRHZZle+NL1ys/2Ox0igAwwMieor/7l5ZKk//Xqe+rqizocEQAAcLsRwSzNmZgY27uFkViXxGNkQC9Ne3u78vPz1dbWpry8PKfDAQDY5As//qNe31+vicUjdMPlxZo2Ll+V5QUqyQvJ5/XI5/HI65W8Ho/ihiHDUOKmc1/aPDrzk94f/uC3na+E2QGfac81XF8f58yZo09+8pP63ve+J0mKx+MqLy/XV7/6VT388MPnXL9gwQJ1dXXp5ZdfTt133XXXqbKyUmvWrLmo39Pss65r61VTZ98lP0+mSX6vJb9HP+p7z+NJfN96PInv8eT3utfjkc+b+I6OG4YSU2kM9UXjauuJqK07oraeiOrae7W7tlW7a1vU3nv6zcrK8gItv32qZowvtOzrBOBOfdGYbvn33+loc7dun16mW6aWalxhtsYWZis36FdnX1SdfVF19UXVG4mlHpfIOT7099nAP51y9dh8055ruOYiTuCsAWB4+uG2w1rx8j4Fs7z6ZEWR5lxWpOsmjdTE4hHK8nmV5U38fOTznn5fJPnPs52dfpz9PoldQn6vad34F/v6mGXK7wYAgAUWXT9Bmw/Uq6apSzUuaP0sCPu1Z/ktToeR0fr7+7Vz504tW7YsdZ/X61VVVZW2b99+3sds375dS5cuPeO++fPn66WXXrrg79PX16e+vtNFivb29ksL/Cw/euOw/uN3NaY+Jy4s5Pdq2rgCfW7OeH1mehnjrwAMSTDLp2V/8wl9+T936ld7T+pXezNvb5nP69Ghx291OgwAAHCR/nbaGK3bdlgnWnu07WCTth1skl5zOqpLs+/b8xUO2FuWoAgCAEhb864YpTceulm7alu091ir9h5r0zsn2tTzoU9XYnhpampSLBZTaWnpGfeXlpbqwIED531MXV3dea+vq6u74O+zcuVKfetb37r0gC8gN5SlMfkhy57fSRfq7jj3U0f62GJE8hNMMcOQMdD1EYsbqfu9nsRzeD1Sls+rgmy/8gduRSMCunpsvmaML9SUMbny+5gCC+DSzb+qVP++YLp++16jTrT26ERLj+raexU3En/PjQhkKSeYlfqEY3LwQnygUzUelwzDUMwwPrJL1SpOdqAAAIDBK80LadtDN+nPDZ16q6ZZv685pbcON6ups9/p0DIKRRAAQForK8hWWUG2/nZamaTEfP/owJug0bihWDzx5qjX60mNmDj7TYWzx2Ol/yBIOG3ZsmVndI+0t7ervLzctOdfcvMVWnLzFaY9HwDAHh6PR3937Tj93bXjUvdFYnH1R+PK9vvk9VJkAAAA5vJ4PJpcmqvJpblaOLdC0un3RmJxQ5F4XPH4R78vcrbzjRG3S7bfvDHhF4siCAAgo3i9HgV4g2HYKi4uls/nU319/Rn319fXa/To0ed9zOjRowd1vSQFg0EFg8FLDxgA4Hp+n5duMwAAYKsPvzeSLfuLCpmGTA0AAGSMQCCgmTNnavPmzan74vG4Nm/erLlz5573MXPnzj3jekl67bXXLng9AAAAAABwDzpBAABARlm6dKkWLVqkWbNmafbs2Xr66afV1dWlxYsXS5LuvfdejR07VitXrpQk/eM//qNuvPFG/du//Ztuu+02bdiwQX/84x/1gx/8wMkvAwAAAAAA2IAiCAAAyCgLFixQY2Ojli9frrq6OlVWVmrTpk2p5ee1tbXyek83u15//fX6yU9+okcffVTf+MY3dMUVV+ill17S1Vdf7dSXAAAAAAAAbOIxjPRfD9ve3q78/Hy1tbUpLy/P6XAAAEgLvD7ah7MGAOBcvD7ah7MGAOBcF/v6yE4QAAAAAAAAAADgShRBAAAAAAAAAACAK1EEAQAAAAAAAAAArkQRBAAAAAAAAAAAuBJFEAAAAAAAAAAA4EoUQQAAAAAAAAAAgCtRBAEAAAAAAAAAAK5EEQQAAAAAAAAAALgSRRAAAAAAAAAAAOBKFEEAAAAAAAAAAIArUQQBAAAAAAAAAACuRBEEAAAAAAAAAAC4EkUQAAAAAAAAAADgShRBAAAAAAAAAACAK1EEAQAAAAAAAAAArkQRBAAAAAAAAAAAuBJFEAAAAAAAAAAA4EoUQQAAAAAAAAAAgCtlOR3AxTAMQ5LU3t7ucCQAAKSP5Oti8nUS1iEXAQDgXOQi9iEXAQDgXBebi2REEaSjo0OSVF5e7nAkAACkn46ODuXn5zsdhquRiwAAcGHkItYjFwEA4MI+LhfxGBnwkY14PK6TJ08qNzdXHo9nyM/T3t6u8vJyHTt2THl5eSZGODxxnubiPM3FeZqHszSXmedpGIY6OjpUVlYmr5cJl1YiF0lPnKe5OE9zcZ7m4SzNRS6SmchF0hPnaS7O01ycp3k4S3M5kYtkRCeI1+vVuHHjTHu+vLw8/sCaiPM0F+dpLs7TPJylucw6Tz51aQ9ykfTGeZqL8zQX52keztJc5CKZhVwkvXGe5uI8zcV5moezNJeduQgf1QAAAAAAAAAAAK5EEQQAAAAAAAAAALjSsCqCBINBVVdXKxgMOh2KK3Ce5uI8zcV5moezNBfnObzx/99cnKe5OE9zcZ7m4SzNxXkOb/z/NxfnaS7O01ycp3k4S3M5cZ4ZsRgdAAAAAAAAAABgsIZVJwgAAAAAAAAAABg+KIIAAAAAAAAAAABXoggCAAAAAAAAAABciSIIAAAAAAAAAABwJdcVQVavXq2KigqFQiHNmTNHO3bs+MjrX3jhBU2ZMkWhUEjXXHONNm7caFOkmWEw57l27VrNmzdPhYWFKiwsVFVV1cee/3Az2D+fSRs2bJDH49Gdd95pbYAZZLBn2draqgcffFBjxoxRMBjU5MmT+X7/kMGe59NPP60rr7xS2dnZKi8v19e+9jX19vbaFG16+93vfqfbb79dZWVl8ng8eumllz72MVu3btWMGTMUDAZ1+eWX67nnnrM8TliHXMRc5CLmIhcxD7mIuchFzEMuAnIRc5GLmItcxFzkI+YhFzFPWuYihots2LDBCAQCxrp164x3333XuP/++42CggKjvr7+vNe/8cYbhs/nM5544glj3759xqOPPmr4/X7jnXfesTny9DTY87znnnuM1atXG7t37zb2799v/MM//IORn59vHD9+3ObI09NgzzPp8OHDxtixY4158+YZd9xxhz3BprnBnmVfX58xa9Ys49ZbbzW2bdtmHD582Ni6dauxZ88emyNPT4M9z/Xr1xvBYNBYv369cfjwYePVV181xowZY3zta1+zOfL0tHHjRuORRx4xfvGLXxiSjBdffPEjr6+pqTHC4bCxdOlSY9++fcZ3v/tdw+fzGZs2bbInYJiKXMRc5CLmIhcxD7mIuchFzEUuMryRi5iLXMRc5CLmIh8xD7mIudIxF3FVEWT27NnGgw8+mPp1LBYzysrKjJUrV573+s9+9rPGbbfddsZ9c+bMMb70pS9ZGmemGOx5ni0ajRq5ubnGj3/8Y6tCzChDOc9oNGpcf/31xrPPPmssWrSIF/sBgz3LZ555xpg4caLR399vV4gZZbDn+eCDDxo333zzGfctXbrUuOGGGyyNMxNdzIv9P//zPxtXXXXVGfctWLDAmD9/voWRwSrkIuYiFzEXuYh5yEXMRS5iHXKR4YdcxFzkIuYiFzEX+Yh5yEWsky65iGvGYfX392vnzp2qqqpK3ef1elVVVaXt27ef9zHbt28/43pJmj9//gWvH06Gcp5n6+7uViQSUVFRkVVhZoyhnue3v/1tlZSU6L777rMjzIwwlLP85S9/qblz5+rBBx9UaWmprr76aj3++OOKxWJ2hZ22hnKe119/vXbu3JlqDa2pqdHGjRt166232hKz2/Ba5B7kIuYiFzEXuYh5yEXMRS7iPF6L3INcxFzkIuYiFzEX+Yh5yEWcZ8drUZZpz+SwpqYmxWIxlZaWnnF/aWmpDhw4cN7H1NXVnff6uro6y+LMFEM5z7M99NBDKisrO+cP8XA0lPPctm2bfvjDH2rPnj02RJg5hnKWNTU1+s1vfqPPfe5z2rhxow4ePKgHHnhAkUhE1dXVdoSdtoZynvfcc4+ampr0qU99SoZhKBqN6stf/rK+8Y1v2BGy61zotai9vV09PT3Kzs52KDIMFrmIuchFzEUuYh5yEXORiziPXMQ9yEXMRS5iLnIRc5GPmIdcxHl25CKu6QRBelm1apU2bNigF198UaFQyOlwMk5HR4cWLlyotWvXqri42OlwMl48HldJSYl+8IMfaObMmVqwYIEeeeQRrVmzxunQMtLWrVv1+OOP6/vf/7527dqlX/ziF3rllVe0YsUKp0MDgBRykUtDLmIuchFzkYsAyATkIpeGXMR85CPmIRfJPK7pBCkuLpbP51N9ff0Z99fX12v06NHnfczo0aMHdf1wMpTzTHryySe1atUqvf7665o2bZqVYWaMwZ7noUOHdOTIEd1+++2p++LxuCQpKytL7733niZNmmRt0GlqKH82x4wZI7/fL5/Pl7rvE5/4hOrq6tTf369AIGBpzOlsKOf5zW9+UwsXLtQXvvAFSdI111yjrq4uffGLX9Qjjzwir5f6+mBc6LUoLy+PT15mGHIRc5GLmItcxDzkIuYiF3EeuYh7kIuYi1zEXOQi5iIfMQ+5iPPsyEVc838kEAho5syZ2rx5c+q+eDyuzZs3a+7cued9zNy5c8+4XpJee+21C14/nAzlPCXpiSee0IoVK7Rp0ybNmjXLjlAzwmDPc8qUKXrnnXe0Z8+e1O0zn/mMbrrpJu3Zs0fl5eV2hp9WhvJn84YbbtDBgwdTCZMkvf/++xozZsywfZFPGsp5dnd3n/OCnkyiEjuvMBi8FrkHuYi5yEXMRS5iHnIRc5GLOI/XIvcgFzEXuYi5yEXMRT5iHnIR59nyWmTaivU0sGHDBiMYDBrPPfecsW/fPuOLX/yiUVBQYNTV1RmGYRgLFy40Hn744dT1b7zxhpGVlWU8+eSTxv79+43q6mrD7/cb77zzjlNfQloZ7HmuWrXKCAQCxs9//nPjgw8+SN06Ojqc+hLSymDP82yLFi0y7rjjDpuiTW+DPcva2lojNzfXWLJkifHee+8ZL7/8slFSUmL8y7/8i1NfQloZ7HlWV1cbubm5xk9/+lOjpqbG+PWvf21MmjTJ+OxnP+vUl5BWOjo6jN27dxu7d+82JBlPPfWUsXv3buPo0aOGYRjGww8/bCxcuDB1fU1NjREOh42vf/3rxv79+43Vq1cbPp/P2LRpk1NfAi4BuYi5yEXMRS5iHnIRc5GLmItcZHgjFzEXuYi5yEXMRT5iHnIRc6VjLuKqIohhGMZ3v/tdY/z48UYgEDBmz55t/P73v0/9txtvvNFYtGjRGdf/7Gc/MyZPnmwEAgHjqquuMl555RWbI05vgznPCRMmGJLOuVVXV9sfeJoa7J/PD+PF/kyDPcs333zTmDNnjhEMBo2JEyca//qv/2pEo1Gbo05fgznPSCRiPPbYY8akSZOMUChklJeXGw888IDR0tJif+BpaMuWLef9uzB5hosWLTJuvPHGcx5TWVlpBAIBY+LEicaPfvQj2+OGechFzEUuYi5yEfOQi5iLXMQ85CIgFzEXuYi5yEXMRT5iHnIR86RjLuIxDHp0AAAAAAAAAACA+7hmJwgAAAAAAAAAAMCHUQQBAAAAAAAAAACuRBEEAAAAAAAAAAC4EkUQAAAAAAAAAADgShRBAAAAAAAAAACAK1EEAQAAAAAAAAAArkQRBAAAAAAAAAAAuBJFEAAAAAAAAAAA4EoUQQAAAAAAAAAAgCtRBAEAAAAAAAAAAK5EEQQAAAAAAAAAALgSRRAAAAAAAAAAAOBK/x/J2Mbw++E2/AAAAABJRU5ErkJggg==",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABkEAAAJtCAYAAACBs9diAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAACs+UlEQVR4nOz9eXychX3u/V/37Nr3zbJsecGLvAYDxiwhJAaHpCTpcspJekLqk9BfKf49NG5OEycsT5IWctqG5nlaGp+4cSFteqCHJpQGSkIdHAIYCDYGW/KCd1v7Ym0jadb7+WMW2bFsS5Zm7pl7Pu/XS68GWWN/RRNLmmu+38swTdMUAAAAAAAAAACAzTisHgAAAAAAAAAAACAVCEEAAAAAAAAAAIAtEYIAAAAAAAAAAABbIgQBAAAAAAAAAAC2RAgCAAAAAAAAAABsiRAEAAAAAAAAAADYEiEIAAAAAAAAAACwJUIQAAAAAAAAAABgSy6rB5iMaDSqtrY2FRUVyTAMq8cBACAjmKapoaEhzZo1Sw4Hr2tIJb4XAQDgQnwvkj58LwIAwIUm+71IVoQgbW1tamhosHoMAAAy0unTpzV79myrx7A1vhcBAODi+F4k9fheBACAi7vc9yJZEYIUFRVJin0yxcXFFk8DAEBmGBwcVENDQ/LrJFKH70UAALgQ34ukD9+LAABwocl+L5IVIUhi1bO4uJgv9gAA/BpOIqQe34sAAHBxfC+SenwvAgDAxV3uexGOdgIAAAAAAAAAAFsiBAEAAAAAAAAAALZECAIAAAAAAAAAAGyJEAQAAAAAAAAAANgSIQgAAAAAAAAAALAlQhAAAAAAAAAAAGBLhCAAAAAAAAAAAMCWCEEAAAAAAAAAAIAtEYIAAAAAAAAAAABbIgQBAAAAAAAAAAC2RAgCAAAAAAAAAABsiRAEAAAAAAAAAADYEiEIAAAAAAAAAACwJUIQAAAAAAAAAABgS4QgAAAAAAAAAADAlghBAAAAAAAAAACALRGCAAAAAAAAAAAAWyIEAQAAAAAAAAAAtkQIAgAAAAAAAAAAbGnKIcgrr7yiO++8U7NmzZJhGHr22Wcv+5idO3fq6quvltfr1cKFC/XEE09cwagAAAAAAADW4nkRAACyy5RDEL/fr1WrVunxxx+f1McfP35cH//4x3Xrrbdq7969+uM//mN94Qtf0E9/+tMpDwsAAAAAAGAlnhcBACC7uKb6gDvuuEN33HHHpD9+69atmjdvnr797W9LkpYuXapXX31Vf/3Xf60NGzZM9Y9HhjBNU6YpmfH/7DAMORyG1WMBAAAAsAnTNCVJhsHPGcgsPC+ChMRzI1GeFwGAjDblEGSqdu3apfXr15/3vg0bNuiP//iPL/qYQCCgQCCQ/OfBwcEZnemhf9uvf3n79Iz+ntnKUOwLdOLnisSX61i4IZk6P+yImlIkak74e7mdhrwup7wuh/K9TtWX5qmhLF9zyvM1pyJfNy2sVEWhN+WfEwAAAIDsYZqmXjvSq3947bgOdgwpEI5oNBjRaCiiikKvvvnJZfro8jqrxwSuWCY+L/K9V47qb3YcmdHfMyud81xIInBNPD9imvGQQ5JMKWKaikRNRZP/98LfzuNyKN/jVL7bqUKfSwuqCrW4tkhLaou0pLZYcyvyCXYBwAIpD0E6OjpUU1Nz3vtqamo0ODio0dFR5eXlXfCYRx99VF//+tdTNlMoYmosFE3Z75+rQhFToUhYwwGp1y+d7hvVG+pL/rrTYeimhZX65OpZun1ZrQq9Kf+vHwAAAIAMFYpE9ZP32vS9V47rQPvET/B2DwX0h/+0R3evm6uvfmypfG5nmqcEpi8TnxcJhqMaCoRT9vvnqmA4qmA4qn6FpAHpcOew/mN/R/LXl80q1n23LtSGZbVysjUCAGmTkc9Cb9myRZs3b07+8+DgoBoaGmbs9/8fGxbrjz60YMZ+P6tN9UUEpnmZf5Z5wYaIw2HIYUhOw5BhxP5z7NdjHxk1TQUjUQVCUQXCUQ2OhXTm7IhO943qdN+IWtoH1dw2qF8c7tYvDnfL69qnL9w8T5tvW8wXfgAAACAHtPWP6r0z/drXOqD9rYN670y/zo6EJEl5bqfuurZBv7GyTkU+t/LcTnndDv3Daye09RdH9YNdJ/X2ibP62898QPOrCi3+TIDUS/XzIv/t+rn6+MpZM/b7WW2qzyokngZJnN0b/+dzP8KQYYxviTgMyWEYcjpib0b8nx3nPC8yGoptso0EI+ofDen9ziEd7BjSwY5BHe4YVnPboP7oh3s0v6pAf/Shhfrk6llyO6dc1wsAmKKUhyC1tbXq7Ow8732dnZ0qLi6e8NUOkuT1euX1pu5sUnmBR+UFnpT9/oi5trH8vH8+1j2s595t07/tbdPxHr8ef/moDnUM6Tv/9QNshQAAAMASpmlqYDSkU30jausfVVm+R4tqilRm8c8L0aiprqGATvWNqHsooOI8lyoKvKoojP0sky1PmpmmqV8c7tb3Xz2uX77fc8GvVxV59fs3NOr31s5Raf6F/86/cscSXT+/XJv/5V21tA/qo//PL1Vb7FOh16VCn0tl+W59bl2jblhYmY5PB7gimfi8SGm+Z8L/zWFm3bKoKvmfz/qD+ofXT+iJ147rWLdfX/o/7+qf3jipH35hrQp4TgQAUirlf8uuW7dOL7zwwnnve+mll7Ru3bpU/9HIMPOrCvXH6xfp/o9cpX/b26Y//df39J8HuvQ7331d2+6+Rg3l+VaPCAAAgCwVicZegZu4326aUvfQmN451a+9p/v1zql+nej1y+NyKM/tVJ7bKafDUMfA2IQnYaqKvFpUU6hCr0v+QETDgbD8gbBGghEFI9HkyRNTpop9bhXnuVWS51aB1yXTNBUMRxWKRBWKmHI5DeW5nfK5nfK5HYpGFduiDkcUCEUViprSOXP7A2Gd6R9VMHzxE76GIbnir0Z2GobyPE4VeF0q8LhU6HWpOM+tqiJPMjipLvJpdlmeGsrzVZbvlmEYGgtFdKLXr6NdfrX1j6q2xKeF1YWaV1kw7bNT/kBYz+5t1T+8dkJHuoYlSQ5DWlpXrOWzSrR8dolW1Jeoqa5YHtelA50PLa7Wf9x/s+5/6h29caxPp/pGzvv1l1o69ZU7luiem+dzax8ZiedFIEllBR5tvm2R7rl5nn745in93ctHtPd0vzb98x5tu/saubIk3AaAbDTlEGR4eFhHjoyXZx0/flx79+5VeXm55syZoy1btqi1tVU/+MEPJEl/+Id/qL/927/Vn/7pn+q///f/rp///Of6l3/5Fz3//PMz91kgqxiGoU99oF5zK/L1B/+4Wwc7hvSpx1/T9+5eozVzyy//GwAAACCnRaKmnv7Vae05dVZnzo7ozNlRtQ+MKTJRS+2vGQlGYrfaf01VkVezSvPUOxzQmbOj6h4KqHsoMMHvcKGxUEBdk/zYqXA6DNWX5qm6yKuhsbB6/QH1+YOKZybxTr7Y5+wPRtQzHJzU71vgcaokz632wbELTuNKsbCioTxfcysK1BAPThrK8lVd7FWxz60iXyxkKfA4zwsdTNPU7pNn9S9vn9ZP3mvXSDAiSSr0uvS71zRo442NV/zCp5pin/73PdfraLdfA6MhDQfCGh4La8eBTv3onVY98sJBNbcN6lu/tVJ5HnpDkFo8L4LpKPK59Ye3LNB188r1mW1v6OVD3Xrg2f169LdWEOQCQIoYpjnRt70Xt3PnTt16660XvP9zn/ucnnjiCf3+7/++Tpw4oZ07d573mC9+8YtqaWnR7Nmz9eCDD+r3f//3J/1nDg4OqqSkRAMDAyouLp7KuMhwbf2j+sKTb6ulfVBFPpde/tKHVFmYupVfALATvj6mD/+ugczyyAsH9L1Xjl3247wuh1bOLtHqhlJ9YE6ZFtcWyTRNjQajGg1FFIpEVV3k1eyy/POeOB8OhHWka1iHO4cUCEdV6HUmNyzyPE55XA55XQ55nLHHDI6FYm+jIQ2NheV2OuJvhtxOh4KRqMZCsa2PsXBEhmHIG/89vC6nXPHb8rHb84a8bocayvJVV+K74JXB0aipwbGQgpGoIlFTkaipcCS2BeMPhOMbKxH1jwbVOxxU73BAPf6gOgbGdObsiDoHzw9rin0uLaguVH1pntoHxnSka1gDoxeGRBNxOQyV5ntUlu9WWYFHPUMBHevxJ3+9sSJfn13XqN+9ZraKfO5J/Z5TZZqm/vGNk/rGv7coHDXVVFdMb0ga5erXR54XwUx5qaVT/79/fFtRU9p82yL9Xx+5yuqRACCrTPbr45RDECvwxd7eRoJh/Zetu9TcNqj/em2DvvXbK60eCQCyAl8f04d/10Dm+Oc3T+mrP94nSfqDD85XU12xZpflaXZZvkrzY0+0J8IEl8OQw8Gras81FoqotX9U/SMhza3IV0WB54Jtjp7hoI50Det034hOnx3Rqb4Rne4bUZ8/qMGxsIbGQskNlF+X53bq4yvr9LvXNOjaxrK0var5jWO9+qMf7lGfP7YNc928cv3Omtn62Io6+gdTiK+P6cO/a/v6xzdO6sFn90uS/uJ3Vup3r2mweCIAyB6EIMgqb5/o0+9s3SXDkP7tvhu1cnap1SMBQMbj62P68O8ayAyvHO7Wxid+pUjU5BWzFjJNU2OhqPpHgzrrD6l/JKi+kaAMGbplcZVloUNr/6i+9uN9+sXh7uSZrzy3U1+4eZ7+5PbFlsxkd3x9TB/+Xdvb/3zxoL6786g8Tod+8acfUl1JntUjAUBWmOzXR1qXkBGuaSzXp1bPkmlK//dzzcqCbA4AAABpdLhzSPf9cI8iUVO/+YF6/f8/vNDqkXKWES9iryvJU9OsYt2wsFK/sXKWPr7S2q2L+tI8PbHxOr325Q/rf2xYrPmVBRoNRfQ3Pz+isVDEsrkA4HL+x+2Ldc3cMgUjUf3TGyetHgcAbIcQBBnjK3csVb7HqT2n+vXs3larxwEAAECG6PMHtfEffqWhQFjXNZbrW79NeSwublZpnu67daF2/MktcsXPoSXOZAFAJnI4DH3+pnmSYmcfCW4BYGYRgiBj1Jb4tCn+ir5HXzio4UDY4okAAACQCX6w64Ra+0c1tyJf/+uza+R1OS//IOQ8wzBUUeiRJPUMBy7z0QBgrduaalRfmqezIyE9t7fN6nEAwFYIQZBRPn/TPM2tyFfXUEB/+/MjVo8DAACADPD8e+2SpP/rw1eprMBj8TTIJpWFXklS7zCbIAAym8vp0N3r5kqStr92nDPhADCDCEGQUbwupx78eJMk6fuvHlPn4JjFEwEAAMBKhzuH9H7XsDxOh9Y31Vg9DrJMIgTpZhMEQBa469oG+dwOHewY0hvH+qweBwBsgxAEGecjS6u1Zm6ZQhFTP36HbhAAAIBc9pP4FsjNV1WqJM9t8TTINolzWGyCAMgGpfke/dbVsyVJT7x+3OJpAMA+CEGQcQzD0O9eE/ui/8zuM6yAAgAA5CjTNPXCvlgI8vGVdRZPg2xUFd8EoRMEQLbYeEOjJOmllk6d7huxdhgAsAlCEGSkj62ok8/t0JGuYb17ZsDqcQAAAGCBw53DOsIpLEwDxegAss1VNUW6+apKRU3pB7tOWD0OANgCIQgyUpHPrTuWx17t98zu0xZPAwAAACs8H98C+eCiShX7OIWFqaMYHUA22nhjoyTpqV+dlj8QtnYYALABQhBkrN9ZEzuJ9dzeNo2FIhZPAwAAgHQyTVPPv9cmKbYlDFyJCs5hAchCH1pUrbkV+RoaC+vnB7usHgcAsh4hCDLWuvkVmlXi0+BYWP95oNPqcQAAAJBGhzuHdbTbzyksTEtl8hwWmyAAsofDYei2pbGvfa8c7rZ4GgDIfoQgyFgOh6HfXjNekA4AAIDckdgC4RQWpiNxDqvPH1Akalo8DQBM3i2LqyRJr7zfLdPk7y8AmA5CEGS03746FoK8crhbnYNjFk8DAACAdDBNUz+J94F8fCWnsHDlygtimyBRU+ofYRsEQPa4trFcPrdDnYMBHe4ctnocAMhqhCDIaI2VBbq2sUxRU/rxO61WjwMAAIA0ONQ5pGPdfnlcDq1fyiksXDm306Gy/NgmESexAGQTn9up6+dXSJJ+cZheEACYDkIQZLzfOeckFiugAAAA9vfCvg5J0gevqlIRp7AwTYly9F7K0QFkmQ9eFT+JdbjH4kkAILsRgiDjfWxFnXxuh450DevdMwNWjwMAAIAU23PyrCTp1iVVFk8CO0iUo3cTggDIMolekLeO92kkGLZ4GgDIXoQgyHhFPnfyDMJLLR0WTwMAAIBUMk1T+9tiL3xZWV9q7TCwhcQmCOewAGSb+ZUFqi/NUzAS1ZvH+qweBwCyFiEIskIiBNlxgDuYAAAAdnbm7Kj6R0JyOw0tqi20ehzYQBXnsABkKcMwktsgvzjcbfE0AJC9CEGQFW5ZVCWHIR3sGFJr/6jV4wAAACBF9rfGtkAW1RTJ63JaPA3soKIgdg6rhxAEQBYa7wUhBAGAK0UIgqxQVuDRmrllkqSfH2QbBAAAwK4Sp7BW1JdYPAnsorIosQnCOSwA2eeGhRVyOQwd6/HrdN+I1eMAQFYiBEHW+PCS2Emsnx/otHgSAAAApMq+1kFJ0nJCEMwQNkEAZLNin1tXz4m9KJSTWABwZQhBkDU+vKRakvTa0V6NBMMWTwMAAICZZpqmmuPnsAhBMFMSmyAUowPIVh9cVCmJk1gAcKUIQZA1FtUUqr40T8FwVK8f6bV6HAAAAMyw9oEx9fqDcjoMLaktsnoc2ESiGL1nOCDTNC2eBgCm7pZFsReFvn60V6FI1OJpACD7EIIgaxiGoY8sjX3h//khekEAAADsZl98C+Sq6kL53JSiY2ZUFMbOYQXCUfmDEYunAYCpWzarWBUFHg0Hwtpz8qzV4wBA1iEEQVZJnMT6+YEuXsUFAABgM4lTWJSiYyble1zK98RCtZ4hekEAZB+Hw9BNV8VOYr1+lMsYADBVhCDIKtfPr1Ce26mOwTG1tA9aPQ4AAABm0D76QJAiiW2QXj8hCIDsdM3cWDn6nlNsggDAVBGCIKv43M7kqx9+foCTWAAAAHayvy32IhdCEMy0yngvSPcQ5egAstMH5sRCkL2n+xWNchkDAKaCEARZ5yPxk1g7DhKCAAAA2EXn4Ji6hwJyGFJTXbHV48BmKgrGy9EBIBstqS1SvsepobGwjnQPWz0OAGQVQhBknVvjIci7Z/rVzU1fAAAAW9gfP4W1sLpQeR5K0TGzqori57CG2QQBkJ1cTodWzo5tSlKODgBTQwiCrFNT7NPy+mKZprTzENsgAAAAdkAfCFKJTRAAdnD1HHpBAOBKEIIgK926OLYN8sv3eyyeBAAAADMhsQmyfBYhCGZeJcXoAGxgPATpt3YQAMgyhCDISjctjJWjv360R6ZJIRgAAEC2298aK0VfMZsQBDOvsii+CUIxOoAs9oE5pZKkI13DGhgJWTsMAGQRQhBkpQ/MKVOe26me4aAOdQ5ZPQ4AAACmoXsooI7BMRmUoiNFkuew2AQBkMUqCr1qrMiXJL1zmpNYADBZhCDISh6XQ9fNK5ckvcpJLAAAgKy2vy12Cmt+ZYEKvC6Lp4EdJYrRe4YIQQBkN05iAcDUEYIga924sEKS9PrRXosnAQAAwHTsPxMLQVZQio4USWyCDI6FFQxHLZ4GAK5c4iTWO5SjA8CkEYIga90Y7wV581ivQhF+kAEAAMhWiU2Q5YQgSJGSPLdcDkMS5egAstsH4psge0/1KxqlIxUAJoMQBFlraW2xygs88gcjevd0v9XjAAAA4Ao1t8VK0ZfNIgRBajgchsoLEiexKEcHkL2W1BYp3+PUUCCs97uGrR4HALICIQiylsNhaN2C2EmsV4/QCwIAAJCNBkZDOnN2VBKl6EitykLK0QFkP5fToZWzYy8a4CQWAEwOIQiy2o0LYiexXiMEAQAAyEoH22NbILNKfCrJd1s8DeysopBydAD2MF6OTggCAJNBCIKsdlO8F+SdU/3yB8IWTwMAAICpOhAPQZpmsQWC1KqKb4L0+jmHBSC7jYcg/dYOAgBZghAEWW1ORb5ml+UpHDX11vE+q8cBAADAFB1oH5IkLeUUFlKssih+DotNEABZ7gNzSiVJR7qGNTASsnYYAMgChCDIeoltEE5iAQAAZJ8DHbFNEEIQpFpFvBidTRAA2a6i0KvGinxJ0junOYkFAJdDCIKsd0M8BKEcHQAAILuEI1Ed6mATBOmRLEYfZhMEQPbjJBYATB4hCLLeDQsqJEkHO4b4gQYAACCLnOj1KxCOKt/j1NzyfKvHgc0li9GH2QQBkP0SJ7HePd1v6RwAkA0IQZD1Kgu9WlJbJEl6/WivxdMAAABgslrifSBLaovkcBgWTwO7YxMEgJ0sry+RJO1vHZBpmhZPAwCZjRAEtpDsBXmfk1gAAADZoqWNPhCkTyIE6fMHFY3yhCGA7La0rlguh6Fef1BtA2NWjwMAGY0QBLZww8LYSaw3jrMJAgAAkC0OtBOCIH3K48Xokaip/tGQxdMAwPT43E5dVRO7irHvzIDF0wBAZiMEgS1c21guhyGd7B1RW/+o1eMAAABgEghBkE4el0MleW5JUi8nsQDYwIr62NfP/a2EIABwKYQgsIUin1sr4vcw32QbBAAAIOP1DgfUNRSQYSjZ7wakWoHHKUkaCUYsngQApi/xPMg+QhAAuCRCENjG9fNjJ7F2UY4OAACQ8Q7ES9EbKwpU4HVZPA1yhccV+xE4FIlaPAkATN+K2aWSYiEI5egAcHGEILCN6xfEe0GO9Vk8CQAAAC5n/BQWWyBIH7cz9iNwkBAEgA0sqS2Sy2Goj3J0ALgkQhDYxjVzy+R0GDrVN6JWekEAAAAyWksiBKmlDwTpkwxBwoQgALIf5egAMDmEILCNIp9byxO9IMc4iQUAAJDJKEWHFcbPYXE2BoA9UI4OAJdHCAJbuX5+uSTpDUIQAACAjBUIR3Ska1iStHQWIQjSx+OkEwSAvSR6Qd4jBAGAiyIEga2sS5SjE4IAAABkrCNdwwpHTZXkuTWrxGf1OMghiU0QzmEBsIsV8YsY+ylHB4CLIgSBrVzTWC6nw9DpvlGdOTti9TgAAACYwIH2IUmxUnTDMCyeBrnE7Yz9941idAB2QTk6AFweIQhspdDrSr4K4s1jfRZPAwAAgInQBwKruDmHBcBmKEcHgMsjBIHtrFsQO4lFLwgAAEBmIgSBVTiHBcCOVsZfDLqvtd/aQQAgQxGCwHaupxcEAAAgY5mmmQxBmghBkGYUowOwo+WzEyHIoMWTAEBmIgSB7Vwzt0xOh6EzZ0d1uo9eEAAAgEzSNRTQ2ZGQnA5DC6sLrR4HOWb8HBblwQDsg3J0ALg0QhDYToHXpZXxV0G8eZxeEAAAgEyS2AKZV1kgn9tp8TTINYlzWAHOYQGwEcrRAeDSCEFgS+vm0wsCAACQiQ51DEmSFtcWWTwJchHF6ADsyOd2alGyHL3f2mEAIAMRgsCWkr0gRwlBAAAAMkkiBFlKCAILuF2GJCnEJggAm1mRLEcfsHgSAMg8hCCwpTXxXpDW/lGdOUsvCAAAQKY4mNwEoRQd6eeNb4IE2QQBYDOUowPAxRGCwJbO6wU5Ri8IANjN448/rsbGRvl8Pq1du1ZvvfXWJT/+O9/5jhYvXqy8vDw1NDToi1/8osbGuJcMpFsoEtWRrmFJsfvlQLpxDguAXSU2QZopRweACxCCwLbWzoudxHrzOCexAMBOnn76aW3evFkPP/yw9uzZo1WrVmnDhg3q6uqa8OP/+Z//WV/5ylf08MMP68CBA/r+97+vp59+Wl/96lfTPDmAEz1+BSNRFXicqi/Ns3oc5CB3vBg9GOYJQgD2sqS2SE6HoV5/UB2DvNgHAM5FCALbWju/XJL0BpsgAGArjz32mO655x5t3LhRTU1N2rp1q/Lz87V9+/YJP/7111/XjTfeqM985jNqbGzU7bffrk9/+tOX3R4BMPMOnlOK7nAYFk+DXOThHBYAm/K5nVpYVShJauYkFgCchxAEtnXN3DI5DOlU34jaB0atHgcAMAOCwaB2796t9evXJ9/ncDi0fv167dq1a8LH3HDDDdq9e3cy9Dh27JheeOEFfexjH7vonxMIBDQ4OHjeG4DpO9gR+98SfSCwSmIThGJ0AHa0bFbs62tzG9+7AsC5CEFgW0U+t5bX0wsCAHbS09OjSCSimpqa895fU1Ojjo6OCR/zmc98Rt/4xjd00003ye12a8GCBfrQhz50yXNYjz76qEpKSpJvDQ0NM/p5ALnqUHwThD4QWMXjjG0g0QkCwI6akiHIgMWTAEBmIQSBra2dFzuJRS8IAOSunTt36pFHHtHf/d3fac+ePfrRj36k559/Xt/85jcv+pgtW7ZoYGAg+Xb69Ok0TgzY10FCEFjM4+IcFgD7SrwQlE0QADify+oBgFS6fn6Ftv3yOJsgAGATlZWVcjqd6uzsPO/9nZ2dqq2tnfAxDz74oD772c/qC1/4giRpxYoV8vv9+oM/+AN97Wtfk8Nx4WtCvF6vvF7vzH8CQA4bGgvpzNnYidIlnMOCRdyJThDOYQGwocQmSGv/qM76gyor8Fg8EQBkBjZBYGvXNJbLMKRjPX51DY5ZPQ4AYJo8Ho/WrFmjHTt2JN8XjUa1Y8cOrVu3bsLHjIyMXBB0OJ1OSZJpmqkbFsB5DnfGtkBqi30qyXdbPA1yVSIE4RwWADsq9rk1pzxfktTSzjYIACQQgsDWSvLcaqqLvRLijeNsgwCAHWzevFnbtm3Tk08+qQMHDujee++V3+/Xxo0bJUl33323tmzZkvz4O++8U9/97nf11FNP6fjx43rppZf04IMP6s4770yGIQBSL3EKazGnsGAhzmEBsLvl9fSCAMCv4xwWbG/tvAo1tw3qzWO9+sSqWVaPAwCYprvuukvd3d166KGH1NHRodWrV+vFF19MlqWfOnXqvM2PBx54QIZh6IEHHlBra6uqqqp055136s///M+t+hSAnHSwPd4HUkcIAut4EpsgYTYBAdjTslklemFfh/a3sgkCAAmEILC9tfPLtf2143qTTRAAsI1NmzZp06ZNE/7azp07z/tnl8ulhx9+WA8//HAaJgNwMYcoRUcG4BwWALtL9IKwCQIA4ziHBdu7rrFcknSka1jdQwGLpwEAAMg9pmnqYEfsFamLayhFh3US57ACFKMDsKnls0okxbpRR4Jhi6cBgMxACALbKyvwJF9x+BbbIAAAAGnXPjCmwbGwXA5DC6oLrB4HOcztNCSxCQLAvqqKvKou8so0pQOUowOAJEIQ5Ijr51dIkt483mvxJAAAALkncQprflWBvC6nxdMgl3k4hwUgByxLnsQiBAEAiRAEOWLtvNhJrDePsQkCAACQbgfjIcjiWk5hwVqJc1hBzmEBsLHl9bGTWM2UowOAJEIQ5Ijr4iHIoc4h9fmDFk8DAACQWxJ9IJSiw2rjxeimxZMAQOokN0HaKUcHAIkQBDmiotCrq6oLJUlvcRILAAAgrRLnsAhBYLVECBKMRGWaBCEA7GlZvBz9UMcQm28AIEIQ5JBEL8gbnMQCAABIm2A4qqPdw5KkxYQgsFjiHJbENggA+5pdlqdin0uhiKn3u4asHgcALEcIgpyxdn68F+Q4IQgAAEC6HOsZVihiqsjrUn1pntXjIMclitElytEB2JdhGMltEMrRAYAQBDkk0QtysGNQ/SP0ggAAAKTDgfZ4H0hdkQzDsHga5Dq3c/y/g4QgAOws0QvSQggCAIQgyB3VRT4tqCqQaUpvsQ0CAACQFgfbE30gxRZPAkgup0OOeA7CnXwAdrasPvZ1d38r5egAQAiCnLI23gvCSSwAAID0aIlvgiytIwRBZji3HB0A7Gp5/BxWS/ugolE6kADkNkIQ5JS18ZNYbxzrtXgSAACA3HAgvgmytI5SdGSGRC8IxegA7Gx+VaF8bodGghGd6PVbPQ4AWIoQBDnl+vgmSEv7oAZGQxZPAwAAYG/dQwH1DAdkGNLiWkIQZAaPK74JwjksADbmdBjJU5SUowPIdYQgyCk1xT7Nq4z1grx9gpNYAAAAqXSwI/akS2NFgfI9LounAWLcyU0QQhAA9pYoRycEAZDrCEGQcziJBQAAkB4Hkn0gbIEgc7hdsWZ0OkEA2N2yeC9Icxvl6AByGyEIcs71lKMDAACkRbIPpJZSdGSORCcI57AA2N25myCmSQ8SgNxFCIKcs3Z+bBNkf+uAhsboBQEAAEiV8U0QQhBkDs5hAcgVi2uL5HQY6vMH1TE4ZvU4AGAZQhDknLqSPM0pz1fUlN4+cdbqcQAAAGwpGI7qSNewJGkJ57CQQRLF6IQgAOzO53bqqupCSVJzK70gAHIXIQhyUrIX5Di9IAAAAKlwpGtY4aipYp9L9aV5Vo8DJHEOC0AuaaIcHQAIQZCbkr0gx+gFAQAASIXEKawldcUyDMPiaYBxiXNYwQj38QHYH+XoAEAIghyV6AXZ1zqg4UDY4mkAAADsJ9kHUsspLGQWd+IcFpsgAHLAMjZBAIAQBLlpdlm+ZpflKRI19fYJtkEAAABm2sGOIUmUoiPzJM9h0QkCIAckzmG19o/qrD9o8TQAYA1CEOSs5Ems44QgAAAAM8k0zfFNEEIQZBiPK3aejWJ0ALmg2OfWnPJ8SVJLO9sgAHITIQhyViIEeeMY5egAAAAzqXsooF5/UA5DWlTDOSxkFjfF6AByzPhJLHpBAOQmQhDkrLXzYr0g750ZkJ9eEAAAgBmTeKVpY2WB8jxOi6cBzsc5LAC5hl4QALmOEAQ5q6H8nF6Qk2etHgcAAMA26ANBJhsvRjctngQA0mNZfYkkQhAAuYsQBDmNk1gAAAAzL9EH0kQIggyU2AShEwRArkhsghztHtZIkEsYAHIPIQhyGiEIAADAzBsvRacPBJnH4+IcFoDcUl3kU1WRV6YpHWgfsnocAEg7QhDkNHpBAAAAZtZYKKKj3X5J0pJaNkGQedxOQxLF6AByS2IbpIVydAA5iBAEOY1eEAAAgJl1pGtYkaipkjy36kp8Vo8DXMDNOSzMgMcff1yNjY3y+Xxau3at3nrrrUt+/He+8x0tXrxYeXl5amho0Be/+EWNjY2laVqAcnQAuY0QBDmPk1gAAAAz59xTWIZhWDwNcKHkOSw2QXCFnn76aW3evFkPP/yw9uzZo1WrVmnDhg3q6uqa8OP/+Z//WV/5ylf08MMP68CBA/r+97+vp59+Wl/96lfTPDly2bJZlKMDyF2EIMh5hCAAAAAzpyUZgnAKC5mJYnRM12OPPaZ77rlHGzduVFNTk7Zu3ar8/Hxt3759wo9//fXXdeONN+ozn/mMGhsbdfvtt+vTn/70ZbdHgJmU2AQ51DHE338Acg4hCHIevSAAAAAzJ7EJ0kQIggw1fg7LtHgSZKNgMKjdu3dr/fr1yfc5HA6tX79eu3btmvAxN9xwg3bv3p0MPY4dO6YXXnhBH/vYx9IyMyBJc8rzVeR1KRiJ6v3OYavHAYC0uqIQhNuXsBN6QQAAAGaGaZpqaWMTBJktcQ4rwDksXIGenh5FIhHV1NSc9/6amhp1dHRM+JjPfOYz+sY3vqGbbrpJbrdbCxYs0Ic+9KFLnsMKBAIaHBw87w2YDsMw1JTsBaEcHUBumXIIwu1L2BEnsQAAAKavbWBMg2NhuRyGrqoptHocYEIUoyPddu7cqUceeUR/93d/pz179uhHP/qRnn/+eX3zm9+86GMeffRRlZSUJN8aGhrSODHsank9vSAActOUQxBuX8KOCEEAAACmL7EFsrC6UF6X0+JpgIm5nYYkQhBcmcrKSjmdTnV2dp73/s7OTtXW1k74mAcffFCf/exn9YUvfEErVqzQb/7mb+qRRx7Ro48+qmh04v8ebtmyRQMDA8m306dPz/jngtyzvD62CbK/lU0QALllSiFIum5fsvaJdKMXBAAAYProA0E28MbPYQU5h4Ur4PF4tGbNGu3YsSP5vmg0qh07dmjdunUTPmZkZEQOx/lPvzidsaDYNCfupvF6vSouLj7vDZiu5bNimyAt7YOKROlFApA7phSCpOv2JWufSDd6QQAAAKYvsQmSuDkOZCLOYWG6Nm/erG3btunJJ5/UgQMHdO+998rv92vjxo2SpLvvvltbtmxJfvydd96p7373u3rqqad0/PhxvfTSS3rwwQd15513JsMQIB3mVxXK53ZoJBjR8R6/1eMAQNpcUTH6VFzJ7UvWPmGFxEmsXUc5iQUAAHAlDnRQio7MlyhGD0Z4FTSuzF133aW/+qu/0kMPPaTVq1dr7969evHFF5MvGD116pTa29uTH//AAw/oT/7kT/TAAw+oqalJn//857Vhwwb9r//1v6z6FJCjnA4jua1JOTqAXOKaygdP9/alJK1YsUJ+v19/8Ad/oK997WsXrIRKsbVPr9c7ldGAaVs3v0LP7D6jXfSCAAAATNnQWEgne0ckEYIgsyU2QYLhiMWTIJtt2rRJmzZtmvDXdu7ced4/u1wuPfzww3r44YfTMBlwacvrS7TnVL/2tw7ok6vrrR4HANJiSpsg6bp9CVhh3YLYJsi+M/0aHAtZPA0AAEB2OdQxJEmqLfapvMBj8TTAxY2fw+LnUQC5J9EL0txG/y6A3DHlc1jcvoRdzSrNU2NFvqKm9KvjfVaPAwAAkFVa2ukDQXZIFKPTCQIgFyW+Tu9vHeDFyQByxpTOYUmx25fd3d166KGH1NHRodWrV19w+/LczY8HHnhAhmHogQceUGtrq6qqqnTnnXfqz//8z2fuswBmyLoFFTrRO6JdR3v1kaU1Vo8DAACQNQ60J/pAiiyeBLi08XNYhCAAcs+imiK5nYYGx8I6c3ZUDeX5Vo8EACk35RBE4vYl7Ov6+RX632+dphcEAABgilriZzWa6kosngS4NLfTkCQF2QQBkIM8LocW1xZpf+ug9rcOEIIAyAlTPocF2Nm6+bFekJb2QfWPBC2eBgAAIDuEI1EdjHeCcA4Lmc7DOSwAOS7RC7K/bcDiSQAgPQhBgHNUF/u0sLpQpim9cYxeEAAAgMk40etXIBxVvsepubyiFBnOwzksADluWX08BGmlHB1AbiAEAX5NYhvkDU5iAQAATEpz/BTWktoiORyGxdMAl5boBImaUiRKKTCA3LOccnQAOYYQBPg16xbEQpBdRwlBAAAAJuNAe+wU1tI6TmEh8yXOYUmcxAKQm5bWFcvpMNTrD6pzMGD1OACQcoQgwK+5Pr4JcqhzSD3DfDMAAABwOS3t8VJ0+kCQBRKbIJIU4CQWgBzkczu1sKpQUmwbBADsjhAE+DXlBR4tqS2SxEksAACAyTgQD0HYBEE2cDvHT7axCQIgVy1LnMSiHB1ADiAEASbASSwAAIDJ6R4KqHsoIMNQ8oUkQCYzDCNZjk4IAiBXJcrRE71eAGBnhCDABBLl6LvYBAEAALikxBbIvIoC5XtcFk8DTE5iGyTIOSwAOSpRjt7MOSwAOYAQBJjA2vkVchjSsW6/OgfHrB4HAAAgYyX6QJbSB4Is4naxCQIgtyV6vNoGxtRLHyoAmyMEASZQkufWslmx1VB6QQAAAC4usQnSRB8IskjiHFYwbFo8CQBYo8jn1rzKAknSfk5iAbA5QhDgIhK9IK8fIQQBAAC4mMQt8SY2QZBF3IkQhE0QADksWY7OSSwANkcIAlxEIgR57WiPxZMAAABkptFgRMe6hyVJy9gEQRbxcA4LALQiXo5OCALA7ghBgIu4rrFcLoehM2dHdap3xOpxAAAAMs6hziFFTamy0KOqIq/V4wCTljiHFaIYHUAOS4Qg+whBANgcIQhwEQVelz4wp1QS2yAAAAATaYmfwlpaVyzDMCyeBpg8tyv239cAmyAActiyeAhy5uyozvqDFk8DAKlDCAJcwg0LKiVJrx0hBAEAAPh1Le2xV47SB4Js42YTBABUkufW3Ip8SWyDALA3QhDgEm5cGAtBdh3tVTRqWjwNAABAZklsgiybVWLxJMDUJM9hRfgeH0BuW85JLAA5gBAEuITVDaXKczvV6w/qUOeQ1eMAAABkjEjU1MGO2PdHTZSiI8skitGDkYjFkwCAtVZSjg4gBxCCAJfgcTl03bxySZzEAgAAONfJXr9GghH53A7NqyywehxgSsbPYbEJAiC3UY4OIBcQggCXccOCCknS60d7LZ4EAAAgc7S0x05hLaktltNBKTqyS+IcFsXoAHId5egAcgEhCHAZiV6QN4/1KsQPSQAAAJKk5ngfCKXoyEZuF8XoACCdX46+v41tEAD2RAgCXEZTXbFK893yByN670y/1eMAAABkhEQpOn0gyEZuZ2x7iRc5AQDl6ADsjxAEuAyHw9C6+bGTWK8d4SQWAACANH4Oi00QZCNvohidTRAASPaCUI4OwK4IQYBJuCF+EotydAAAAKlraEzdQwEZhrSktsjqcYApSxajswkCAMkQ5L0zhCAA7IkQBJiEG+Pl6O+c6tdoMGLxNAAAANY60D4kSZpfWaB8j8viaYCpS4QgwYhp8SQAYL3lsyhHB2BvhCDAJMyrLFBdiU/BSFS/OtFn9TgAAACWSvaBxJ80AbKNh3NYAJBUkk85OgB7IwQBJsEwDN2wIH4S6ygnsQAAQG5L9oFQio4sxTksADgf5egA7IwQBJikGxfGTmK9Tjk6AADIcS3xV4lSio5s5XEakghBACCBcnQAdkYIAkzSjfFy9P1tA9zIBAAAOWskGNaxHr8kNkGQvTiHBQDnW8EmCAAbIwQBJqmm2KdFNYUyTU5iAQCA3HWwY0imKVUVeVVV5LV6HOCKjBejE4IAgDRejn66b1T9I7zwE4C9EIIAU3DTwipJ0qvvE4IAAIDclCxFZwsEWYxOEAA4X0m+W3PK4+XorYMWTwMAM4sQBJiCm6+KncT65fs9Mk3T4mkAAADSL1mKTh8IshjnsADgQitmx7ZB3mvtt3YQAJhhhCDAFKydXy6301Br/6hO9I5YPQ4AAEDasQkCO/AkN0F4YRMAJCR7Qc7QCwLAXghBgCnI97h09ZwySdKr73dbPA0AAEB6RaKmDnbEQpBlbIIgi9EJAgAXWpnYBCEEAWAzhCDAFJ17EgsAACCXHO/xaywUVb7HqbkVBVaPA1wxzmEBwIVW1JfIMKTW/lH1DAesHgcAZgwhCDBFN10VK0ffdbRXYV45BgAAckiiD2RJbZGcDsPiaYAr53bG/vtLMToAjCvyuTW/MvYiB05iAbATQhBgilbUl6gkz62hQFjv8k0BAADIIc1tse99KEVHthvvBCEEAYBzrZxdKkl690y/pXMAwEwiBAGmyOkwdMOCCknSq5zEAgAAOWS8FL3E4kmA6eEcFgBMjF4QAHZECAJcgZvivSCvHqEcHQAA5AbTNMdDEDZBkOXcyU0Q0+JJACCzJDZB3jvTL9Pk70gA9kAIAlyBmxfGekHeOdWv4UDY4mkAAABSr3sooF5/UA5DWlxTZPU4wLQkQpAg57AA4DzLZhXL5TDUMxxU28CY1eMAwIwgBAGuwJyKfM0pz1c4auqNo71WjwMAAJByzfFS9AVVhcrzOC2eBpgezmEBwMR8bqcWxV/ssI9eEAA2QQgCXKHxk1j0ggAAAPvjFBbshGJ0ALi4VQ2xXpB36QUBYBOEIMAVunlhLAT55fv0ggAAAPtraU+UohOCIPu5XYYkQhAAmMiK+lJJsV4QALADQhDgCt2woFIOQzra7Vdr/6jV4wAAAKQUmyCwE885xejRKMW/AHCulbNjmyDvnRng70gAtkAIAlyhkny3VjeUSpJeOcw2CAAAsK/hQFgnev2SpKVsgsAG3K7xH4VDUbZBAOBci2uL5HU5NDQ2/vUfALIZIQgwDR9cVCWJEAQAANjboY5BmaZUU+xVZaHX6nGAaUtsgkixbRAAwDi305Hc/NzXSi8IgOxHCAJMwy3xEOTVIz0Kc08YAADYVPIUFlsgsAn3OSFIMMz38QDw61bNLpUkvXuaEARA9iMEAaZh5exSlea7NTQW1t7T/VaPAwAAkBLJUnT6QGATTochp4NydAC4mPFekH5rBwGAGUAIAkyD02HoxoWVkjiJBQAA7CuxCbJsVonFkwAzx+2MhSBsggDAhRIhyP62AS5fAMh6hCDANCVOYv2CEAQAANhQOBLVwY4hSZzDgr0kekGCPLkHABeYX1moQq9LY6Go3u8atnocAJgWQhBgmj54VSwEea91QH3+oMXTAAAAzKzjPX4FwlEVeJyaU55v9TjAjPG4Yj8Ocw4LAC7kcBhaXh978QMnsQBkO0IQYJpqS3xaUlsk04wVpAMAANhJc/wU1tK6YjniHQqAHSQ2QUJh0+JJACAzJcrR3ztDOTqA7EYIAsyADyZOYh3iJBYAALAXStFhV25X4hxWxOJJACAzrYyHIO+yCQIgyxGCADMg0QvyyvvdMk1eSQYAAOwjUYpOHwjsxp3oBGETBAAmtKohVo5+sH1IYyECYwDZixAEmAHXNJYpz+1U91BAB9qHrB4HAABgRpimmdwEWUoIAptJnsOiEwQAJlRfmqfKQq/CUVP7WzmJBSB7EYIAM8DrcmrdggpJsW0QAAAAO+gYHFOfPyinw9Di2iKrxwFmVPIcVpgQBAAmYhiGVjeUSpL2nu63dBYAmA5CEGCGfPCqSknSK4cJQQAAgD0kTmEtrCqUz+20eBpgZnmchiQ2QQDgUj4wp1SS9A4hCIAsRggCzJBEOfqvTvTJHwhbPA0AAMD0JftAKEWHDXmSxeiEIABwMclNkFP9ls4BANNBCALMkHmVBWooz1MoYmrX0V6rxwEAW3v88cfV2Ngon8+ntWvX6q233rrkx/f39+u+++5TXV2dvF6vFi1apBdeeCFN0wLZqzkegiwjBIENjRejE4IAwMWsnF0iw5Ba+0fVPRSwehwAuCKEIMAMMQxDty6uliS9fKjL4mkAwL6efvppbd68WQ8//LD27NmjVatWacOGDerqmvjv3mAwqNtuu00nTpzQM888o0OHDmnbtm2qr69P8+RA9kmUojdRig4bcieL0U2LJwGAzFXkc2thVaEkekEAZC9CEGAGJUKQnYe6ZZr8MAUAqfDYY4/pnnvu0caNG9XU1KStW7cqPz9f27dvn/Djt2/frr6+Pj377LO68cYb1djYqFtuuUWrVq1K8+RAdhkcC+lU34gkzmHBnhLnsOgEAYBLGy9HP2vtIABwhQhBgBl0/fwKeV0OtfaP6v2uYavHAQDbCQaD2r17t9avX598n8Ph0Pr167Vr164JH/Pcc89p3bp1uu+++1RTU6Ply5frkUceUSQSueifEwgENDg4eN4bkGsOxE9h1ZfmqTTfY/E0wMzzcA4LACZldbwcnU0QANmKEASYQXkep9YtqJAkvXyQk1gAMNN6enoUiURUU1Nz3vtramrU0dEx4WOOHTumZ555RpFIRC+88IIefPBBffvb39af/dmfXfTPefTRR1VSUpJ8a2homNHPA8gGiVNYSzmFBZtyOw1JFKMDwOUkNkHeOz2gaJSrFwCyDyEIMMM+tKhKEr0gAJApotGoqqur9b3vfU9r1qzRXXfdpa997WvaunXrRR+zZcsWDQwMJN9Onz6dxomBzNAS3wThFBbsinNYADA5i2uKlOd2aigQ1tFurl4AyD6EIMAM+1C8F+TtE2c1OBayeBoAsJfKyko5nU51dnae9/7Ozk7V1tZO+Ji6ujotWrRITqcz+b6lS5eqo6NDwWBwwsd4vV4VFxef9wbkmuZ4CLKMEAQ25eYcFgBMisvp0Ir6EknSO5zEApCFCEGAGdZYWaD5lQUKR0299n6P1eMAgK14PB6tWbNGO3bsSL4vGo1qx44dWrdu3YSPufHGG3XkyBFFo+NPch0+fFh1dXXyeOg5ACYSDEf1fteQJKmJc1iwqUQnCJsgAHB59IIAyGaEIEAKJLZBdh7qtngSALCfzZs3a9u2bXryySd14MAB3XvvvfL7/dq4caMk6e6779aWLVuSH3/vvfeqr69P999/vw4fPqznn39ejzzyiO677z6rPgUg473fNaRQxFSxz6XZZXlWjwOkxPg5LO7bA8DlJHpB3iUEAZCFXFYPANjRrUuqtP2143r5UJdM05RhGFaPBAC2cdddd6m7u1sPPfSQOjo6tHr1ar344ovJsvRTp07J4Rh/nUdDQ4N++tOf6otf/KJWrlyp+vp63X///fryl79s1acAZLxz+0D4PgZ2lTiHFeAcFgBcViIEOdgxpNFgRHke56UfAAAZhBAESIHr5pUrz+1U11BALe2DWjarxOqRAMBWNm3apE2bNk34azt37rzgfevWrdMbb7yR4qkA+2hpj4cgdXwPA/tycw4LACatrsSn6iKvuoYC2t82oGsby60eCQAmjXNYQAp4XU7duLBSEiexAABA9qEUHblg/BwWIQgAXI5hGMltkL2n+i2dBQCmihAESJFbl1RJkl4+2GXxJAAAAJNnmqYOnHMOC7ArjzN26i3IOSwAmBTK0QFkK0IQIEUS5eh7Tp1V/0jQ4mkAAAAm58zZUQ0FwvI4HVpYXWj1OEDKcA4LAKYmsQnyzqmz1g4CAFNECAKkSH1pnhbVFCpqSr84zEksAACQHZrbBiRJi2oLk08SA3aUOIcVjJgWTwIA2WHV7FI5DKltYEwdA2NWjwMAk8ZPNUAK3boktg3CSSwAAJAtWhKnsOo4hQV7S4R8wXDE4kkAIDsUeF1aUhv7/mAP2yAAsgghCJBC65fWSJJ2Hu5WmDV7AACQBZoJQZAjxs9hsQkCAJN19dxSSdKek4QgALIHIQiQQh9oKFVpvlv9IyHtOdVv9TgAAACX1dIeC0GW1ZdYPAmQWl4XnSAAMFVr5pZJknazCQIgixCCACnkcjr0oUVVkqQdBzstngYAAODS+vxBtcdvfC+pLbJ4GiC1xs9hEYIAwGRdPScWgjS3DmosxDlBANmBEARIsQ/HT2L9/AC9IAAAILMl+kAaK/JV5HNbPA2QWm6nIUkKsgkCAJM2pzxflYUeBSNRNbcNWD0OAEwKIQiQYrdcVSWnw9D7XcM61Tti9TgAAAAXlXgyo2kWfSCwPw/nsABgygzD0Afi2yB7TvZbOwwATBIhCJBiJfluXRO/mflzTmIBAIAMlihFXzaLPhDYH+ewAODKJHtBKEcHkCUIQYA0WB8/ibXjICexAABA5mITBLlkfBPEtHgSAMguiV6QPafOyjT5OxRA5iMEAdLgw0urJUlvHuvTcCBs8TQAAAAXGgmGdazHL0laRgiCHOCJb4KE2AQBgClZObtELoehrqGAWvtHrR4HAC6LEARIg/mVBWqsyFcwEtWr73dbPQ4AAMAFDnYMyTSlykKvqot8Vo8DpJw7vgkSoBMEAKbE53YmXzDBSSwA2YAQBEgDwzD04SXxk1gHOIkFAAAyz3gfCFsgyA1upyEpVozOORcAmJqr470g75zqt3YQAJgEQhAgTT4SP4n18qEuRaP8kAUAADJLS7wPhBAEucLrdEqSTFOK8P05AExJoheETRAA2YAQBEiTaxvLVeR1qWc4qPdaB6weBwAA4DwtyU2QEosnAdLD7TKS/znISSwAmJI18U2QlvZBjQTpPgWQ2QhBgDTxuBz64KIqSdKOA50WTwMAADAuHInqYMeQJKmJTRDkCLdz/MfhUJhNEACYilmleaot9ikSNfXeGV7oCSCzEYIAafThJbGTWC+1EIIAAIDMcbTbr0A4qkKvS3PL860eB0gLl8OQEV8GYRMEAKYusQ2y5xQnsQBkNkIQII0+vKRaDkM62DGk030jVo8DAAAgSWqO94EsrSuSw2Fc5qMBezAMI7kNQggCAFOXKEffQy8IgAxHCAKkUVmBR9c0lkviJBYAAMgc9IEgV3niIUgoTAgCAFN19ZxSSdKeU/0yTc4KAshchCBAmt22tEaS9J8HuiyeBAAAIKY5HoI01dEHgtziccVDEDZBAGDKls0qkcflUJ8/qOM9fqvHAYCLIgQB0mx9UywEeeNYrwZGQxZPAwAAcp1pmslzWJSiI9e4nbHzbwE2QQBgyjwuh1bPLpUkvX2Ck1gAMhchCJBm8yoLtLC6UOGoqV8c7rZ6HAAAkOPOnB3V4FhYbqehRTVFVo8DpFWiE4RNEAC4MtfOi/WC/OpEn8WTAMDFEYIAFlifOInVQi8IAACwVuIU1lXVRcnTQECuGD+HxS17ALgSid7TtylHB5DB+CkHsMBtTdWSpJcPdfGqMwAAYKmW9kQpOqewkHsSxehBzmEBwBW5ek6ZDEM63uNX19CY1eMAwIQIQQALrG4oU0WBR0NjYb11nJVRAABgnRb6QJDDOIcFANNTkufW4vg5zd30ggDIUIQggAWcDkMfXhLbBnmJk1gAAMBCiXNYy2aVWDwJkH6Jc1hBQhAAuGLXzYudxPoVIQiADEUIAljktqZ4L8iBTpkmN4gBAED69fmDah+Ina5YWkcpOnKP22lI4hwWAExHoheEcnQAmYoQBLDITVdVyuty6MzZUR3qHLJ6HAAAkIOa46ew5lbkq8jntngaIP3cdIIAwLRd21gmKfZ9xXAgbPE0AHAhQhDAIvkel25aWClJeqmZk1gAACD9xk9h0QeC3JQoRg9HCUEA4ErVleRpdlmeoqa091S/1eMAwAUIQQALrY+fxHrpACEIAABIP/pAkOucjtg5rHCU87QAMB3Xxk9ivcVJLAAZiBAEsNBHllbLMKT3zgyofWDU6nEAAECOSZzDYhMEuSpxDiscIQQBgOm4Jn4S621CEAAZiBAEsFB1kU9Xz4l9o/CfLWyDAACA9PEHwjre45fEJghyF5sgADAzrotvgrxzql+hCCcGAWQWQhDAYrfHT2L9jBAEAACk0cGOQZmmVF3kVVWR1+pxAEu4nPEQhCfsMEWPP/64Ghsb5fP5tHbtWr311luX/Pj+/n7dd999qqurk9fr1aJFi/TCCy+kaVog9RZUFao0363RUCR5bhMAMgUhCGCx25fVSpJ2He3VwGjI4mkAAECuoBQdkNyORDE6myCYvKefflqbN2/Www8/rD179mjVqlXasGGDurq6Jvz4YDCo2267TSdOnNAzzzyjQ4cOadu2baqvr0/z5EDqOByGrpnLSSwAmYkQBLDYvMoCXVVdqHDU1M5DE3/TDAAAMNOaWylFB5zJTRBCEEzeY489pnvuuUcbN25UU1OTtm7dqvz8fG3fvn3Cj9++fbv6+vr07LPP6sYbb1RjY6NuueUWrVq1Ks2TA6mVKEf/FSEIgAxDCAJkgNuXxU9iNXMSCwAApEdzO6XogDvZCcI5LExOMBjU7t27tX79+uT7HA6H1q9fr127dk34mOeee07r1q3Tfffdp5qaGi1fvlyPPPKIIpHIRf+cQCCgwcHB896ATHdNPAR5+8RZmSbhMoDMQQgCZIDbm2InsXYe6tJY6OLfCAMAAMyEUCSqwx3DktgEQW5zcg4LU9TT06NIJKKamprz3l9TU6OOjo4JH3Ps2DE988wzikQieuGFF/Tggw/q29/+tv7sz/7son/Oo48+qpKSkuRbQ0PDjH4eQCosry+W1+VQrz+oYz1+q8cBgCRCECADrKgvUW2xT/5gRLuO9lo9DgAAsLn3O4cVjERV5HOpoTzP6nEAy7gpRkcaRKNRVVdX63vf+57WrFmju+66S1/72te0devWiz5my5YtGhgYSL6dPn06jRMDV8brcmp1Q6kk6a3jnMQCkDkIQYAM4HAYuq0pfhKrZeJXDwEAAMyU5rbYKaymumIZhmHxNIB1XIkQhE0QTFJlZaWcTqc6O88/ZdzZ2ana2toJH1NXV6dFixbJ6XQm37d06VJ1dHQoGAxO+Biv16vi4uLz3oBssHZe7CTWm8d4gSeAzHFFIcjjjz+uxsZG+Xw+rV27Vm+99dYlP76/v1/33Xef6urq5PV6tWjRIr3wwgtXNDBgV4lekJdaOhXhhzAAAJBCzW2UogPSOeewKEbHJHk8Hq1Zs0Y7duxIvi8ajWrHjh1at27dhI+58cYbdeTIEUXP6Z45fPiw6urq5PF4Uj4zkE7Xz6+QJL15vI9eEAAZY8ohyNNPP63Nmzfr4Ycf1p49e7Rq1Spt2LBBXV1dE358MBjUbbfdphMnTuiZZ57RoUOHtG3bNtXX1097eMBO1s6rUJHPpZ7hoPaePmv1OAAAwMZakiEIryxGbqMYHVdi8+bN2rZtm5588kkdOHBA9957r/x+vzZu3ChJuvvuu7Vly5bkx997773q6+vT/fffr8OHD+v555/XI488ovvuu8+qTwFImQ/MKZPbaah9YEyn+kasHgcAJEmuqT7gscce0z333JP84r5161Y9//zz2r59u77yla9c8PHbt29XX1+fXn/9dbndbklSY2Pj9KYGbMjjcujDS6r1b3vb9LPmTq2ZW271SAAAwIaiUVMt7fEQpJ4QBLnNmewE4dXKmLy77rpL3d3deuihh9TR0aHVq1frxRdfTJalnzp1Sg7H+GtOGxoa9NOf/lRf/OIXtXLlStXX1+v+++/Xl7/8Zas+BSBl8jxOrZpdqrdPntWbx/o0t6LA6pEAYGqbIMFgULt379b69evHfwOHQ+vXr9euXbsmfMxzzz2ndevW6b777lNNTY2WL1+uRx55RJFI5KJ/TiAQ0ODg4HlvQC64vSl2Q/ZnLZ2sjQIAgJQ41Tei4UBYXpdDC6sKrR4HsJQ7cQ6Lc7SYok2bNunkyZMKBAJ68803tXbt2uSv7dy5U0888cR5H79u3Tq98cYbGhsb09GjR/XVr371vI4QwE4SJ7HeOE4vCIDMMKUQpKenR5FIJPnqhoSamhp1dExc5nzs2DE988wzikQieuGFF/Tggw/q29/+tv7sz/7son/Oo48+qpKSkuRbQ0PDVMYEstYti6vkcTl0vMev97uGrR4HAADYUKIPZEltkVzOK6oIBGzD6aAYHQBm2tr5iXL0PosnAYCYlP/UE41GVV1dre9973tas2aN7rrrLn3ta1/T1q1bL/qYLVu2aGBgIPl2+vTpVI8JZIRCr0s3L6yUJL24f+JgEQAAYDqa2wYkSU2UogNyJ89h0QkCADNlzdwyuRyGWvtHdZpeEAAZYEohSGVlpZxOpzo7O897f2dnp2prayd8TF1dnRYtWnTemufSpUvV0dGhYDA44WO8Xq+Ki4vPewNyxYblsf8tEYIAAIBUaKYUHUhKbEOxCQIAMyff49KK2bEXW7x5nG0QANabUgji8Xi0Zs0a7dixI/m+aDSqHTt2aN26dRM+5sYbb9SRI0cUjY6/subw4cOqq6uTx+O5wrEB+1q/tEZOh6GW9kFeMQEAAGYcIQgwLnkOi00QAJhRyV6QY/SCALDelM9hbd68Wdu2bdOTTz6pAwcO6N5775Xf79fGjRslSXfffbe2bNmS/Ph7771XfX19uv/++3X48GE9//zzeuSRR3TffffN3GcB2Eh5gUfXNcbuZ/60mW0QAAAwc7oGx9QzHJDDkJbUEoIAyXNYbIIAwIxaOy/eC0I5OoAM4JrqA+666y51d3froYceUkdHh1avXq0XX3wxWZZ+6tQpORzj2UpDQ4N++tOf6otf/KJWrlyp+vp63X///fryl788c58FYDMfXV6rXcd69eL+Dn3h5vlWjwMAAGwisQWyoKpQeR7nZT4asD9n/GfXcIQQBABm0jWN5XI6DJ3uG1Vr/6jqS/OsHglADptyCCJJmzZt0qZNmyb8tZ07d17wvnXr1umNN964kj8KyEm3L6vRw881a/eps+oaGlN1kc/qkQAAgA0kStE5hQXEuBPnsKKcwwKAmVTodWl5fYnePd2vN4/16reunm31SABy2JTPYQFIvbqSPK1uKJVpSi+1dFo9DgAAsInxPpASiycBMgPF6ACQOtcnTmIdoxwdgLUIQYAMtWFZrSTpxf30ggAAgJmxn00Q4DyuZDE6IQgAzLS18+kFAZAZCEGADLVhWaxnZ9fRXg2MhCyeBgAAZLuB0ZBO941KYhMESHDFi9FDEc5hAcBMu6axXA5DOtE7oo6BMavHAZDDCEGADDW/qlCLa4oUjpr6+SFOYgEAgOlpiZ/Cml2Wp5J8t8XTAJnBGd8EiXAOCwBmXLHPnXzhBdsgAKxECAJksMQ2CCexAADAdFGKDlzITScIAKTU9fGTWLuOEoIAsA4hCJDBNiyP9YL84nC3RoJhi6cBAADZjFJ04ELJTpAo57AAIBVuWFApSXqdEASAhQhBgAzWVFeshvI8jYWi+sWhbqvHAQAAWYxNEOBCLkd8E4RidABIiWvnlcvpMHSqb0Sn+0asHgdAjiIEATKYYRj62PI6SdILnMQCAABXaCwU0dFuvyRpeT2bIEDCeDE6IQgApEKh16VVs2Pfe+w6xjYIAGsQggAZ7o4VsRDk5wc6NRaKWDwNAADIRgc7hhSJmqos9Ki6yGv1OEDGcCWL0TmHBQCpcuPC+EmsIz0WTwIgVxGCABlu1ewSzSrxyR+M6JXDnMQCAABTlziF1TSrRIZhWDwNkDlcTs5hAUCqrVtQISnWC2Ka/H0LIP0IQYAMZxiGPho/ifUiJ7EAAMAV2N+aKEWnDwQ413gxOk/KAUCqXD2nTB6XQ11DgeR5TgBIJ0IQIAt8bEWtJOmlA50KhDmJBQAApqaFUnRgQolOkDDnsAAgZXxup66ZWyZJ2nWUk1gA0o8QBMgCV88pU3WRV0NjYb1+hCIxAAAweeFIVAc7hiRJy2dRig6cy+WIn8NiEwQAUirZC3KU5zQApB8hCJAFHA5DdyyPbYO8sK/d4mkAAEA2OdrtVyAcVaHXpTnl+VaPA2SUxDks05QiBCEAkDKJXpBdx3oV5e9bAGlGCAJkiTtWxHpBftbSqVCEdX0AADA5+1vjpeh1xXI4KEUHzpU4hyWJ77EBIIVW1peo0OtS/0hILe2DVo8DIMcQggBZ4trGclUWejQwGtIu1kcBAMAkNbfFnmhoog8EuEDiHJbEJggApJLL6dB188oliec0AKQdIQiQJZwOQ7cvi53E+o/9nMQCAACT00wpOnBR526ChCOEIACQSjfET2K9Tjk6gDQjBAGyyMeWx09iNXcqzLo+AAC4jGjUVEt8E2QZpejABVznnIgLR/n+GgBS6YYFsXL0t473cYIQQFoRggBZZO38cpXlu9XrD+qt431WjwMAADLc6bMjGgqE5XE6dFVNodXjABnHMAw540FImHNYAJBSS2qLVF7gkT8Y0Xtn+q0eB0AOIQQBsojb6dCG+Emsn+zjJBYAALi0RB/I4toiuZ186w9MJLENwquSASC1HA5D6+bHT2IdoRcEQPrwkxCQZT6+MnYS68X9HZzEAgAAl0QfCHB5iRCEYnQASL118V6QV4/QCwIgfQhBgCyzbn6Fygs86vMH9cYxTmIBAICLa072gRCCABfjim9JhShGB4CUu2lhrBdkz6mz8gfCFk8DIFcQggBZxuV06KPL4yex3muzeBoAAJDJEiFIE6XowEWxCQIA6TO3Il+zy/IUiph0nQJIG0IQIAv9xor4SazmDm4XAwCACXUNjql7KCDDkJbWFVk9DpCxXE46QQAgXQzD0M1XxbZBfvk+J7EApAchCJCF1s6vUGWhR/0jIb1+lDIxAABwocQWyPzKAuV7XBZPA2QulyP2YzGbIACQHjctrJIkvXqk2+JJAOQKQhAgCzkdhu5YHtsG+cm7nMQCAAAXSpSiL6/nFBZwKYlNkHCUTRAASIcbFlTIMKTDncPqHByzehwAOYAQBMhSv7EyFoL8tLlDwTA/sAEAgPNRig5MTqIThGJ0AEiPsgKPVsRfpPEqJ7EApAEhCJClrmksV3WRV4NjYVZIAQDABcZDEDZBgEvhHBYApN9NC2O9IK8eIQQBkHqEIECWcjoMfSxekP6T99otngYAAGSSwbGQTvWNSGITBLgcitEBIP1uumo8BDFNQmgAqUUIAmSxxEmsl5o7FQhHLJ4GAABkipb4Fkh9aZ5K8z0WTwNkNpeTTRAASLc1c8uU53aqeyigQ51DVo8DwOYIQYAsdvWcMtUW+zQUCOuVw6yQAgCAmMQprCa2QIDLohMEANLP63LqunnlkugFAZB6hCBAFnM4DH18ZeIkVpvF0wAAgEzR3DYgiVNYwGQkQpBwlHNYAJBON8dPYv2SEARAihGCAFnuzlWzJEkvtXRqNMhJLAAAMH4Oi1J04PISnSCcwwKA9Er0grx5vJcT3wBSihAEyHKrZpeooTxPI8GIfn6wy+pxAACAxcZCEb3fNSyJTRBgMlyO2I/FnMMCgPRaXFOkqiKvxkJR7T551upxANgYIQiQ5QzD0J0rY9sgz73bavE0AADAaoc7hxSJmirLd6uuxGf1OEDGcyc3QTiHBQDpZBiGbloY2wahFwRAKhGCADaQOIn18qFuDY6FLJ4GAABYaX/r+CkswzAsngbIfE6K0QHAMskQ5AghCIDUIQQBbGBJbZEWVhcqGI7qpeZOq8cBAAAWohQdmBqXM/ZjcTjCJggApFuiHH1f64D6/EGLpwFgV4QggA2cexLr399rs3gaAABgpeZEKXo9pejAZLjimyBhitEBIO2qi31aUlsk05R++X631eMAsClCEMAmfmNVnaTYHU1ePQEAQG6KRE0d7Eicw2ITBJiMRDE6IQgAWOOWxVWSpFcOcxILQGoQggA2saCqUMtmFSscNfUf+9utHgcAAFjgWPewxkJR5XucmldRYPU4QFZIbIJECEEAwBK3XBUPQd7vlmnydzGAmUcIAtjIJ+IF6f/+LiexAADIRYlTWEvriuVwUIoOTIbLmShGpxMEAKywprFM+R6nuocCOtA+ZPU4AGyIEASwkY+vjJ3EevN4nzoHxyyeBgAApBul6MDUuePF6GyCAIA1vC6n1s2vkCT94jC9IABmHiEIYCOzy/K1Zm6ZTFN6/j1OYgEAkGuSpeiEIMCkOR2JTRBCEACwyngvCCEIgJlHCALYzJ3xbZDnOIkFAEBOMU3znBCkxOJpgOyROIcV5hwWAFjmg/FekLdP9skfCFs8DQC7IQQBbOZjK+vkMKS9p/t1um/E6nEAAECatPaPamA0JJfD0FU1hVaPA2SNRDF6mHNYAGCZxsoCza3IVyhiatfRXqvHAWAzhCCAzVQX+XR9/JbmTziJBQBAzkhsgVxVUySvy2nxNED2cDliPxaHo2yCAICVEtsg9IIAmGmEIIAN/cbKWZKkf+ckFgAAOaMlHoI01dEHAkyFO34Oi2J0ALDWLYvivSDvE4IAmFmEIIANfXR5rVwOQy3tgzraPWz1OAAAIA0oRQeujDO+CUIxOgBYa92CCrmdhk72juhEj9/qcQDYCCEIYEPlBR7ddFWlJOkn73ISCwCAXNDSNiCJEASYKjfF6ACQEQq8Ll0zt1wS2yAAZhYhCGBTd8ZPYj33bqtMk1e1AbCXxx9/XI2NjfL5fFq7dq3eeuutST3uqaeekmEY+tSnPpXaAYE0O+sPqm1gTJLURAgCTImTYnQAyBi3LI73ghwiBAEwcwhBAJu6bVmNPC6Hjnb7dbBjyOpxAGDGPP3009q8ebMefvhh7dmzR6tWrdKGDRvU1dV1ycedOHFCX/rSl3TzzTenaVIgfVraY6ew5lbkq8jntngaILu4nPFidM5hAYDlEuXou471KhCOWDwNALsgBAFsqtjn1q3xV1BQkA7ATh577DHdc8892rhxo5qamrR161bl5+dr+/btF31MJBLR7/3e7+nrX/+65s+fn8ZpgfRojp/CohQdmDoXmyAAkDGW1hWpusirkWBEvzp+1upxANgEIQhgY78RP4n1k/faOYkFwBaCwaB2796t9evXJ9/ncDi0fv167dq166KP+8Y3vqHq6mp9/vOfn9SfEwgENDg4eN4bkMkoRQeu3HgIQicIAFjNMAzdurhakvTyoUtvegPAZBGCADb2kaXVynM7dapvRO+dGbB6HACYtp6eHkUiEdXU1Jz3/pqaGnV0dEz4mFdffVXf//73tW3btkn/OY8++qhKSkqSbw0NDdOaG0i18RCkxOJJgOzjjp/DirAJAgAZ4dYlsasWLx8kBAEwMwhBABvL97i0vin2RCEnsQDkoqGhIX32s5/Vtm3bVFlZOenHbdmyRQMDA8m306dPp3BKYHpGgxEd6x6WxCYIcCUSxeihCJsgAJAJblxYKZfD0LEev070+K0eB4ANEIIANnfnyjpJ0r+/18ar2wBkvcrKSjmdTnV2dp73/s7OTtXW1l7w8UePHtWJEyd05513yuVyyeVy6Qc/+IGee+45uVwuHT16dMI/x+v1qri4+Lw3IFMd7BhU1JQqCz2qKvJaPQ6QddzO+DksitEBICMU+dy6trFckrSTk1gAZgAhCGBztyyuUkmeW52DAb1+tMfqcQBgWjwej9asWaMdO3Yk3xeNRrVjxw6tW7fugo9fsmSJ9u3bp7179ybfPvGJT+jWW2/V3r17OXMFW0icwmqaVSLDMCyeBsg+Tkfsx2KK0QEgc3x4SaIXpNviSQDYASEIYHNel1OfWBUrSP/X3WcsngYApm/z5s3atm2bnnzySR04cED33nuv/H6/Nm7cKEm6++67tWXLFkmSz+fT8uXLz3srLS1VUVGRli9fLo/HY+WnAswIStGB6XE5KUYHgEyT6AXZdaxXI8GwxdMAyHYuqwcAkHq/vWa2/vGNk3qxuUPDgbAKvfxPH0D2uuuuu9Td3a2HHnpIHR0dWr16tV588cVkWfqpU6fkcPA6D+SOlrYBSYQgwJVyJzZBOIcFABljQVWhZpfl6czZUe062quPLK2xeiQAWYxnQoEcsGp2ieZXFehYt18v7GvX717D+RcA2W3Tpk3atGnThL+2c+fOSz72iSeemPmBAIuEI1Ed7BiSJC2bVWLxNEB2ShSjcw4LADKHYRi6dXG1/vGNk3r5UBchCIBp4WWSQA4wDEO/ffVsSZzEAgDATo71+BUIR1XgcWpueb7V4wBZabwYnXNYAJBJEiexXj7YLdMkqAZw5QhBgBzxmx+ol2FIbx7v0+m+EavHAQAAM6A5fgpraV2xHA5K0YErwSYIAGSmdfMr5XU51No/qve7hq0eB0AWIwQBcsSs0jzdsKBCkvTjd1otngYAAMyE5lZK0YHpcjvpBAGATJTncWpd/HmMlw92WTwNgGxGCALkkMRJrB/tOcMqKQAANtDSnghB6AMBrhSbIACQuW5dXC1JevkQIQiAK0cIAuSQDctqle9x6kTviPacOmv1OAAAYBpM01RzWywEaWITBLhiyU6QKJ0gAJBpEiHI2yfOanAsZPE0ALIVIQiQQwq8Lt2xvE6S9MxuTmIBAJDNWvtHNTAaksth6KqaQqvHAbKWy8E5LADIVHMq8rWgqkDhqKlfHu6xehwAWYoQBMgxv311vSTpJ++1aSwUsXgaAABwpVriWyBX1RTJ63JaPA2QvcbPYbEJAgCZ6CNLayRJOw50WjwJgGxFCALkmOvnV6iuxKehsbB2Huq2ehwAAHCFkqew6jiFBUwHxegAkNk+siR2Euvnh7oUjhBYA5g6QhAgxzgchu5cNUuS9Ny7nMQCACBbjZeiE4IA03FuMbppEoQAQKZZM7dMpflu9Y+EtOdUv9XjAMhChCBADvpEPATZcaBLQxSLAQCQlRLnsAhBgOlJFKNLUiRKCAIAmcbldCQL0jmJBeBKEIIAOWjZrGLNrypQIBzVz5r5BgIAgGzTPxJUa/+oJGkpIQgwLS7n+I/FYUIQAMhIH1kaC0FeIgQBcAUIQYAcZBiGPrkqVpD+3LttFk8DAACmKrEFMqc8X8U+t8XTANnN5RjfBCEEAYDM9MFFVXI7DR3r9ut4j9/qcQBkGUIQIEd9YnXsJNarR3rUOxyweBoAADAViT4QStGB6TsvBKFwFwAyUrHPrbXzKiRxEgvA1BGCADlqXmWBVs4uUSRq6oV97VaPAwAApqCZPhBgxjjZBAGArJA4ifWfhCAApogQBMhhiYL0f9vLSSwAALJJc9uAJKmJEASYNsMwktsg4QghCABkqvVLayRJvzpxVgMjIYunAZBNCEGAHHbnqlkyDOntk2d15uyI1eMAAIBJGAtFdLQ7dgt72awSi6cB7MHljIcgUc5hAUCmaijP1+KaIkWipnYe7rJ6HABZhBAEyGE1xT5dH7+p+e/vchILAIBscKhjSJGoqfICj2qKvVaPA9iCyxH70ZhNEADIbOMnsQhBAEweIQiQ4xIF6f+2t9XiSQAAwGQkStGXzSqWYRiX+WgAk8EmCABkh4/ET2LtPNSlUIS/swFMDiEIkOPuWF4rt9PQwY4hHYg/qQIAADJXsg+kjj4QYKYkO0EoRgeAjLa6oVSVhR4NjYX1q+N9Vo8DIEsQggA5rjTfo9uaYq+keOK1E9YOAwAALqulLfaiBUrRgZnDOSwAyA5Oh6EPL4mdxPpZS6fF0wDIFoQgAPT5m+ZJkn78Tqu6hwIWTwMAAC4mEjV1oH1IUuwcFoCZ4WQTBACyxu1NtZKkl1o6ZZr8vQ3g8ghBAGjN3HJ9YE6pgpGo/vGNk1aPAwAALuJEr1+joYjy3E7Nqyy0ehzANtyJThDuywNAxrvpqkrle5xq7R9VcxtnvQFcHiEIAEnSF26aL0n6pzdOaiwUsXgaAAAwkcQP+kvqipKvXAcwfS5n7EfjEOewACDj+dxO3bKoSpL00+YOi6cBkA0IQQBIkjYsq9Hssjz1+YP60Z5Wq8cBAAATSPaBUIoOzKhEMXqEc1gAkBVuXxbrNiUEATAZhCAAJMVe/bbxxlg3yN+/ekxRfgAEACDjNLcNSJKWzSqxeBLAXlzxc1ihKOewACAbfHhxjVwOQ4c7h3W8x2/1OAAyHCEIgKTfvWa2irwuHev2a+fhLqvHAQAA5zBNc3wThFJ0YEY5HbEfjSOcwwKArFCS79a6BRWSpJ+xDQLgMghBACQV+dz69No5kqS//+Vxi6cBAADn6hoKqNcflMOQltQWWT0OYCvu+DmsMJsgAJA1bm/iJBaAySEEAXCez93QKKfD0OtHe7XvzIDV4wAAgLjEFsiCqkL53E6LpwHsJXEOK8xJWADIGrc11UqS3jndr67BMYunAZDJCEEAnKe+NE93rqyTJP3pv76nsVDE4okAAIB0bh8Ip7CAmeaKn8MKcw4LALJGbYlPqxpKZZrSSwc6rR4HQAYjBAFwga9+fKkqCjw60D6ov3jxkNXjAAAASS3t9IEAqZIsRo9wDgsAssmGZYmTWIQgAC6OEATABaqLfPrL/7JSkrT9tePaeYiSdAAArNYcP4e1bFaJxZMA9uOKd4JEOIcFAFllw7LYSaxdR3s0OBayeBoAmYoQBMCEPrykRr9/Q6Mk6Uv/5111DwWsHQgAgBw2NBbSyd4RSdLSOjZBgJmWOIcVIgQBgKyyoKpQC6oKFIqYevkgL+AEMDFCEAAX9ZU7lmhxTZF6hoP602felWnyQyEAAFY40D4kSaor8am8wGPxNID9OOPnsCKcwwKArJPYBvlpc4fFkwDIVIQgAC7K53bq//30B+RxOfTyoW49+h8HOREAAIAFWihFB1LKHT+HFeZ7XQDIOncsr5MkvXywW6PBiMXTAMhEhCAALmlxbZEe+o0mSdL3Xjmmz37/TU5jAQCQZslSdE5hASnhcsbPYUUIQQAg2yyvL9bssjyNhiL6xWFOYgG4kMvqAQBkvv92/VwV+Vza8qN9ev1orz7+//5Sf/uZq3XdvPIJP77PH9TB9kGdOTuq/tGgzo6E1D8S0kgwLLfTIbfTIa/LoTyPU7curta1jWUyDOOKZotETR3uHNK7p/u193S/2gbG9KFFVfqtq+tVmj/9cyGBcEQH2oc0GowoEI4oEI7K43LopoWVcjvJkQEA6ZEoRW+iFB1IifFidM5hAUC2MQxDH1tRp++9ckwv7OvQR+ObIQCQQAgCYFI+ubpey2YV695/2qP3u4b16W1vaO28cuV7nPK6nfK5nOoeDuhg+6C6prAp8t2dRzWvskD/5ZrZ+u2rZ6um2HfZx4wEw/rPA116bm+bXj/ao5FfW3d95XC3vvXiQX18RZ0+fd2cKwpZTvWO6IdvndT/efuM+vzBC379poWVemLjtclXDQIAkCrBcFTvdw5L4hwWkCqueCcImyAAkJ3uWF6r771yTDsOdGosFJHP7bR6JAAZhBAEwKQtrC7Sv226UV/90T49u7dNrx/tvejHzq3IV2NFgcoLPCrNd6s0z6MCr1PhqKlgOKpgOKq2gVG9uL9Dx3v8+osXD+nbPzuspXVFWj6rRMtmFatpVokKvE71j4TUPxLbKHnjWK9eauk8L/go9Lq0cnaJVjWUqizfrR/tadXBjiH9+J1W/fidVq1uKNWXP7pE6xZUXPLzM01TvzjcrSdeP6FfHO5Woge+LN+tikKvvK7YBktL+6BePdKjb/6kRV//5PIZ+XcLAMDFHOkaVjASVZHPpdlleVaPA9iSyxF7YQv9dwCQnVY3lGpWiU9tA2P65fs9uq2pxuqRAGQQQhAAU5Lvcemv71qt/3b9XLX2j2o0GNFYKKLRUFQleW4tqSvS4poiFXgn99fLNz8Z1vPvtetf3j6tt0+e1f7WQe1vHbzs4xrK8/SJVbP0sRV1WlJbLKdjfNPjnpvn690zA3rqrVN6dm+r9p7u16e3vaEPLqrSn25YrOX1558SiURN/cf+dn1359HkuRFJuvmqSv236+fqI0uqz9v4eHF/h/7wn3bryV0ntbCmSJ+9fu6kPlcAAK7EuX0gV3o+EsClJc5hhTiHBQBZyTAMfXR5nba/dlz/sa+dEATAeQhBAEyZYRi6prFc18zA71Xgdel3r23Q717boDNnR7TvzID2tw2ouW1QzW2DikRNlea7VZbvUWmeW/MqC/TxlXVa3VB60SeCDMPQ6oZSrW4o1ebbF+lvdhzR/37rlF453K1XDndrUU2hqoq8qiz0qizfo18c7tbxHr8kKd/j1Kevm6PPXj9XjZUFE/7+H11eq/+xYbH+8qeH9H8/16x5FQW66arKGfi3AQDAhZrbBiRJTZzCAlIm8YKXCOewMEmPP/64/vIv/1IdHR1atWqV/uZv/kbXXXfdZR/31FNP6dOf/rQ++clP6tlnn039oEAO+diKWm1/7bheOtCpQDgir4uTWABiCEEAZIzZZfmaXZavO1bMXIlZdZFP3/zUcn3h5nl67KXD+re9bTrcOazD8dvqCSV5bm28sVGfW9eosoLLF6r/0YcW6EjXsH78Tqv+6Ie79ex9N2p+VeGMzQ0AQEJLfEtxGaXoQMokNkHCnMPCJDz99NPavHmztm7dqrVr1+o73/mONmzYoEOHDqm6uvqijztx4oS+9KUv6eabb07jtEDuuHpOmaqLvOoaCuj1I726dcnF//cIILcQggDICXMrCvT//NcP6Eu3L9aJXr96hgPqGQqqZzigWaV5+u01s1U4yRNeUmzb5NHfWqGTvX7tOdWvh59r1j9+fm0KPwMAQC4yTfO8c1gAUmO8GJ1zWLi8xx57TPfcc482btwoSdq6dauef/55bd++XV/5ylcmfEwkEtHv/d7v6etf/7p++ctfqr+/P40TA7nB4TB0x/JaPbnrpF7Y104IAiCJEARATmkoz1dDef6M/F4+t1Pf+u2Vuv2vX9Fbx/sUikTlPqc7BACA6TpzdlRDY2F5nA4trGbjEEiVxCYIxei4nGAwqN27d2vLli3J9zkcDq1fv167du266OO+8Y1vqLq6Wp///Of1y1/+8rJ/TiAQUCAQSP7z4ODlexMBSHesqNOTu07qZy2deoSf0QHE8TcBAEzDwqpClea7FQhHk+dKAACYKc3xry1X1RTK4+JbdyBVEp0gITpBcBk9PT2KRCKqqTm/dLmmpkYdHR0TPubVV1/V97//fW3btm3Sf86jjz6qkpKS5FtDQ8O05gZyxbWN5aos9GhgNKRdR3utHgdAhuAnKQCYBofD0NVzyiRJu0+etXgaAIDdtCRK0TmFBaTU+CYI57Aws4aGhvTZz35W27ZtU2Vl5aQft2XLFg0MDCTfTp8+ncIpAftwOgxtWFYrSXphX7vF0wDIFIQgADBNV88plSTtOUUIAgCYWYk+kGWzCEGAVEqEICHOYeEyKisr5XQ61dnZed77Ozs7VVtbe8HHHz16VCdOnNCdd94pl8sll8ulH/zgB3ruuefkcrl09OjRCf8cr9er4uLi894ATM7HVtRJkn7W0qkwXU8ARAgCANN29dzYJsgeNkEAADMscQ6raVaJxZMA9pY4h8WTZbgcj8ejNWvWaMeOHcn3RaNR7dixQ+vWrbvg45csWaJ9+/Zp7969ybdPfOITuvXWW7V3717OXAEpsHZeucoLPOrzB/XGsT6rxwGQAShGB4BpWjW7VE6HobaBMbX1j2pWaZ7VIwEAbKDPH1T7wJgkaWldkcXTAPZGMTqmYvPmzfrc5z6na665Rtddd52+853vyO/3a+PGjZKku+++W/X19Xr00Ufl8/m0fPny8x5fWloqSRe8H8DMcDkd2rCsVv/7rVN6fl+bbrpq8qfoANgTmyAAME0FXlfyySlOYgEAZsqB+CmsuRX5KvK5LZ4GsDeK0TEVd911l/7qr/5KDz30kFavXq29e/fqxRdfTJalnzp1Su3tdBEAVvp4/CTWT5s5iQXgCkOQxx9/XI2NjfL5fFq7dq3eeuutST3uqaeekmEY+tSnPnUlfywAZKw1cxInsfqtHQQAYBvNlKIDacMmCKZq06ZNOnnypAKBgN58802tXbs2+Ws7d+7UE088cdHHPvHEE3r22WdTPySQw66fz0ksAOOmHII8/fTT2rx5sx5++GHt2bNHq1at0oYNG9TV1XXJx504cUJf+tKXdPPNN1/xsACQqRK9ILvZBAEAzJCWNkrRgXRxOePF6LxaGABsIXESS5Ke39dm8TQArDblEOSxxx7TPffco40bN6qpqUlbt25Vfn6+tm/fftHHRCIR/d7v/Z6+/vWva/78+dMaGAAy0dXxTZDm1gGNhSIWTwMAsIPmZAhCKTqQai5HvBidTRAAsA1OYgFImFIIEgwGtXv3bq1fv378N3A4tH79eu3ateuij/vGN76h6upqff7zn5/UnxMIBDQ4OHjeGwBkstlleaou8iocNfXemQGrxwEAZLmxUERHu4clSU1sggAplziHRQgCAPbBSSwACVMKQXp6ehSJRJJlXwk1NTXq6OiY8DGvvvqqvv/972vbtm2T/nMeffRRlZSUJN8aGhqmMiYApJ1hGFoTP4lFOToAYLoOdgwpakqVhR5VF3mtHgewvcQ5LF4pDAD2wUksAAlXVIw+WUNDQ/rsZz+rbdu2qbKyctKP27JliwYGBpJvp0+fTuGUADAzEiexdp8kBAEATE+iD2RpXbEMw7B4GsD+EuewKEYHAHvhJBYASXJN5YMrKyvldDrV2dl53vs7OztVW1t7wccfPXpUJ06c0J133pl8XzQa+wvH5XLp0KFDWrBgwQWP83q98np5xRuA7JIoR99z8qxM0+RJKwDAFWtui51WpA8ESA+K0QHAnn79JNZNV03+RdoA7GNKmyAej0dr1qzRjh07ku+LRqPasWOH1q1bd8HHL1myRPv27dPevXuTb5/4xCd06623au/evZy5AmAry+uL5XE61OsP6mTviNXjAACyWEt7bBOEPhAgPdzxEIRNEACwF05iAZCu4BzW5s2btW3bNj355JM6cOCA7r33Xvn9fm3cuFGSdPfdd2vLli2SJJ/Pp+XLl5/3VlpaqqKiIi1fvlwej2dmPxsAsJDX5dSK2bFX7NILAgC4UpGoqYPtQ5KkZYQgQFo44+ewQhFCEACwm8RJrBf3d7DxB+SoKZ3DkqS77rpL3d3deuihh9TR0aHVq1frxRdfTJalnzp1Sg5HSqtGACBjXT2nVLtPntXuk2f1W1fPtnocAEAWOt7j12goojy3U40VBVaPA+QElyNejB7lyTEAsJvr55erstCjnuGgXj3So1sXV1s9EoA0m3IIIkmbNm3Spk2bJvy1nTt3XvKxTzzxxJX8kQCQFdbMLdO2Xx6nHB0AcMUSfSBL64rkdNAvBaSDi3NYAGBbLqdDH19Rpyd3ndRze9sIQYAcxMoGAMygVQ2lkqTDnUOs2QIArkiiD4RSdCB9XJzDAgBb+8TqeknST5s7NBqMWDwNgHQjBAGAGVRb7JPP7VDUlFrPjlo9DgAgC7W0UYoOpFviHBabIABgT1fPKdXssjyNBCP6zwOdVo8DIM0IQQBgBhmGoTnl+ZKkU30jFk8DAMg2pmmquS2xCUIIAqRL4hwWm7wAYE+GYeiTq2dJkp57t83iaQCkGyEIAMywOeWxEtuThCAAgCnqHAyozx+U02FoUU2R1eMAOcPtjP1oHGYTBABs65Pxk1g7D3VpYCRk8TQA0okQBABmWHITpNdv8SQAgGyTKEVfWFUon9tp8TRA7nCecw7LNAlCAMCOFtUUaUltkUIRU/+xv93qcQCkESEIAMywuRWcwwIAXBn6QABruB3jPxqzDQIA9pXYBvm3vZzEAnIJIQgAzLDEJsjJXkIQAMDU0AcCWMMZ7wSRKEcHADu7c1WdJOmN473qGBizeBoA6UIIAgAzbM45myCcUwAATEVLe3wTpI4QBEgnl2M8BKEcHQDsa3ZZvq5tLJNpSj95j20QIFcQggDADJtdlifDkEaCEfX6g1aPAwDIEoNjoeQpRc5hAemVKEaX2AQBALv7BCexgJxDCAIAM8zrcqqu2CeJk1gAgMk7ED+FVV+ap9J8j8XTALnlnEUQhSKEIABgZx9fUSeXw9C+1gEd7R62ehwAaUAIAgApMH4Sy2/xJACAbNFMKTpgGcMw5I73goSjnMMCADsrL/Dog4uqJEnPvtNq8TQA0oEQBABSYG55gSTpVO+oxZMAALIFfSCAtZzxdZAwmyAAYHuf+kDsJNaP32mlyxPIAYQgAJACiU2Qk2yCAAAmKbEJsoxNEMASbkfsx+MwnSAAYHu3La1RodelM2dHtfvkWavHAZBihCAAkAJzyuPnsOgEAQBMQiAc0fudQ5KkZfUlFk8D5CZn/BxWhHNYAGB7eR6nPrq8VpL0I05iAbZHCAIAKTA32QlCCAIAuLz3O4cVjpoqzXdrVonP6nGAnOSKb4JQjA4AueE34yexnn+vXYFwxOJpAKQSIQgApEBiE6RrKKDRIN9MAQAurbltQFLsFJZhGBZPA+SmZDE6IQgA5ITr51eottingdGQXj7YbfU4AFKIEAQAUqA036Nin0sS2yAAgMsb7wPhFBZglWQxOuewACAnOB2GPrl6liTpWU5iAbZGCAIAKTKHk1gAgEmiFB2wnttJMToA5JpPxU9i/fxglwZGQhZPAyBVCEEAIEXmlhdIkk72+i2eBACQySJRUwfaCUEAqyU3QTiHBQA5Y2ldsZbUFikYier5fe1WjwMgRQhBACBFEpsgp9kEAQBcwolev0aCEeW5nZpXWWj1OEDOcnEOCwByUqIgnZNYgH0RggBAiiTK0U8SggAALiFxCmtJXVHylegA0i95DotNEADIKZ9YPUuGIb11oo8XMQI2RQgCACkyNx6CnOrlmygAwMU1tw1I4hQWYLXxYnRCEADIJXUlebpxQaUk6Z/fOmXxNABSgRAEAFIkcQ7rzNlRRfhhGgBwES3JUvQSiycBcpvbmegE4RwWAOSau9fNlST98I2T8gfCFk8DYKYRggBAitSV5MntNBSMRNUxOGb1OACADGSaZvIcFpsggLXYBAGA3PWRpTVqrMjX4FhY/7rnjNXjAJhhhCAAkCJOh6HZZfFekF6/xdMAADJRx+CY+vxBOR2GFtUUWT0OkNOSnSAUowNAznE6DP33m+ZJkra/epxrDoDNEIIAQAolytEpVwMATKS5NbYFsrCqUD630+JpgNzmSmyCUIwOADnpd9bMVkmeWyd6R7TjQKfV4wCYQYQgAJBCiRDkJOXoAIAJcAoLyBxOR2IThBAEAHJRvselz6ydI0n6+1ePWzwNgJlECAIAKTQ3Xo5+kk0QAMAEmtsGJElNhCCA5ShGBwB8bl2jXA5Dbx3v03tn+q0eB8AMIQQBgBRq4BwWAOASxjdBSiyeBADF6ACA2hKf7lw1S5L0fbZBANsgBAGAFEpugnAOCwDwa/pHgmrtH5XEJgiQCZLF6HSCAEBO+3y8IP3599rVPjBq8TQAZgIhCACkUKITZGA0pIGRkMXTAAAySUt8C6ShPE8leW6LpwHAJggAQJKW15do7bxyhaOm/vbnR6weB8AMIAQBgBTK97hUVeSVJJ3o9Vs8DQAgkyRPYdVxCgvIBHSCAAAS/nj9IknSD988pV+d6LN4GgDTRQgCACk2r7JAknS8hxAEADAuUYq+jFNYQEZwOWI/HofYBAGAnLduQYXuuqZBkvSVf31PY6GIxRMBmA5CEABIsQVVsRDkWPewxZMAADJJchOknhAEyASJc1iRKJsgAADpqx9bqqoir452+/X4y5zFArIZIQgApFhiE+QYmyAAgLiRYFhH4+H48lmcwwIywfg5LDZBAABSSb5b3/jEMknSd3ce1cGOQYsnAnClCEEAIMXmVxZK4hwWAGDcgfYhRU2pqsir6mKf1eMAkOSMn8OiGB0AkPDR5bW6valG4aipL//rPkX4GgFkJUIQAEixeVXjnSCmyTdMAIDxPpDl9IEAGYNidADArzMMQ9/81HIV+Vx693S/vvbjfTpzdsTqsQBMkcvqAQDA7hrK8uV0GBoJRtQ5GFBtCa/4BYBc19wa7wPhFBaQMVxsggAAJlBT7NMDH1+qL//rPj31q9P6l7dP6yNLa/S5dY26dl6ZTFOKRE1FTVOmJEOSwzBkGJLb6ZDbyWvQM0kgHNG7pwf05rFevXWiT2dHgopGpWj8RauJ164asddGyOU0VOh1qdDrVpHPpdJ8t25YUKmbFlYqz+O06LPAVBGCAECKeVwOzSnP1/Eev451DxOCAAC0P7EJQik6kDFcdIIAAC7irmvnqLLQq3947YRePdKjl1o69VJL52UfZxhSXbFPDeX5mlOer7rSPDkNIxaYxJ9tL/S5VJrvUWmeW6X5HvmDYXUPBdQ9FFDX4JhCUVMFHqcKvC4VeFyx/+t1Jv+zz+1QKGIqEI4oEIoqFInq3K9kpin5g2ENjoY0EH/zByIaC8XeRkMRjQQjGg1G5A+GNRqMKBw1VZbvVlmBR2X5HhX7XApFTQVCEY2FogqEIzIMQy6HIZfTIZfDkNNhyO005HLE/tnhMGTE/x0YMpTncWpBVYEWVBdqYXWhqgq9GglG1BX/PHuGgwpHozJNyZQp05QKvC6Vx2coL/CoJM8tp8OY8N91NGpqJBRRIBRRIBxVIByVPxDW6b4Rnegd0clev451+/XumX4FwtPb+vyH107I53bog1dV6bamGi2bVaL60jwV57lkGBPPB2sRggBAGsyrLIiFID1+3bCw0upxAAAWCoQjOtw5JIlNECCTuOJPqoSinMMCAFzoI0tr9JGlNTrSNaR/3HVSz+w+I38wcsnHmKbUNjCmtoExvXm8L02TzoyB0ZBO9Kbu9JfH6VDwCk5QFnldKs5zqyTPLbfT0MBoSP2jIQ2OhjTZZc7KQo/WzqvQ2vnlaijLl8NhyGHEN3gkmVIyjAlHTA0HwhoOhDU0FtKZs6PacaBLrf2j+llLp352ThiW73FqVmmein0ueVyxLSCvy6Hr5pXrDz64YMqfK2YOIQgApMH8ygL9XNKxbsrRASDXvd85rFDEVEmeW7PL8qweB0Bc4pWllN4CAC5lYXWRvv7J5XrgN5o0GorIYRhyGobiVxWT55SiZuzJ8zNnR3W6b0SnekfUMTgmKbYd4TAMmaY0NBZ7Ev/sSEgDI0HleVyqLvKqqsir6iKvPC6HRoIRDQfCGgmENRyIaCQYlj8Qlj8Y2+bwuBzyupzyuBzyOI3YNkJ8DlOmCrwulcSDg5I8twq8LuW5nfK5HfK5ncpzxzZN8jxO5XucchqG+kdD6vMHddYf1OBYSG5n7GN97tifJUmhSFSRaCwoCEdNhaNRhSKmwpGoIqapc2tRB0dDOtI1rCPdwzrVN5IMQPI9TtUU+1RZ6JHH5ZAhI3mKajgQ1ll/UH3+oAbHwpKkoUBYQ4GwWvtHL/r/I088fPC6nZpdlqfGinzNrShQY2W+VtSXakFVwbQ2Nr7+CVMt7YP6WXOnfnG4W6f7RtTrD2okGNGRruELPv4/D3TpjuV1aijPv+I/E9NDCAIAaTBejn7hF0MAU/f444/rL//yL9XR0aFVq1bpb/7mb3TddddN+LHbtm3TD37wA+3fv1+StGbNGj3yyCMX/Xgg1RKl6MtmFbMuD2SQxM12zmEBACZjMn0f+R6Xqot8unpOWZqmyg5joYi6hwIqK/Co0Du5p6dDkeh5J70GRkMKR0yV5MeCndI8t4p8bnldDjkucjJrphiGoWWzSrRsVom+eNsiSbHPqa1/VG39Y/IHwwqGY6fJvrvzqN7vGtabx/sIQSxECAIAaTCvMhGCsAkCTNfTTz+tzZs3a+vWrVq7dq2+853vaMOGDTp06JCqq6sv+PidO3fq05/+tG644Qb5fD79z//5P3X77berublZ9fX1FnwGyHX746Xoy+s5hQVkksQmSJhzWAAApJTP7ZxyIOB2OlRR6FVFoTdFU02Pz+3U/KpCza8qPO/9hzuHYyHIsV79zprZFk2HS8eVAIAZsSD+RfD02VEFp1nABeS6xx57TPfcc482btyopqYmbd26Vfn5+dq+ffuEH//DH/5Qf/RHf6TVq1dryZIl+vu//3tFo1Ht2LEjzZMDMfvP2QQBkDncFKMDAIAZtnZ+uSRlXSeM3RCCAEAaVBd5VeBxKhI1daovdcVigN0Fg0Ht3r1b69evT77P4XBo/fr12rVr16R+j5GREYVCIZWXl6dqTOCiIlFTB9rZBAEykSt+zD1EJwgAAJgh18wtk8OQTvWNqH3g4j0mSC1CEABIA8Mwkr0gx7rpBQGuVE9PjyKRiGpqas57f01NjTo6Oib1e3z5y1/WrFmzzgtSfl0gENDg4OB5b8BMONY9rLFQVPkep+ZVFFg9DoBzuJyJYnS2dgEAwMwo8rmTL3568xjbIFYhBAGANJlXGTuJRS8IYJ1vfetbeuqpp/TjH/9YPp/voh/36KOPqqSkJPnW0NCQxilhZ4lTWE11xSkvbAQwNclNEM5hAQCAGbR2HiexrEYIAgBpMp9ydGDaKisr5XQ61dnZed77Ozs7VVtbe8nH/tVf/ZW+9a1v6Wc/+5lWrlx5yY/dsmWLBgYGkm+nT5+e9uyARCk6kMkSxegRzmEBAIAZdN28CknSm8d7LZ4kdxGCAECazE+ewyIEAa6Ux+PRmjVrzis1T5Scr1u37qKP+4u/+At985vf1Isvvqhrrrnmsn+O1+tVcXHxeW/4/9q78yip6zPf459auqq66Z2GXqABQRHZIwREw3VMOnKjY+LMeOSaGWQ4iVmUnIx9bkZxoU00gl7G4x1DdIJh4tyjwUlGHScSshCZRO0JE5aIgkZlabZuuoHel9q+94/qKmwFpeG31PJ+ndMHu/h19dNfgXqqnnqeB1Z44zBL0YF0dWoxOuOwAACAdeZNKJfHk3g96FhXv9vh5CSKIADgkAsGO0H20gkCnJf6+nqtW7dOTz31lPbs2aOvf/3r6unp0bJlyyRJN998s1asWJG6/qGHHtK9996r9evXa8KECWpublZzc7O6u9nPA2fF40a7jyQ6QabV0AkCpBu/j3FYAADAeiUFeZpSlXgT1FZGYrmCIggAOCRZBGnrHlBnf8TlaIDMtXjxYq1Zs0YrV67U7NmztXPnTm3atCm1LL2pqUlHjx5NXf/4448rHA7rhhtuUHV1depjzZo1bv0IyFEHT/aqayCqgM+riyoL3Q4HwAf4GYcFAABsktoLwnJ0V/jdDgAAckVRKE+jioJq7RrQvtYezaotdTskIGMtX75cy5cvP+3vbdmyZcjn+/fvtz8g4Cwk94FMqS5Sno/3IgHpJlkEicQZhwUAAKx12cRy/ei1/ewFcQnPvgDAQRNTI7EYwwMAueaNI8l9IIzCAtKR30cnCAAAsMcnJyQ6Qf7U0q0TPWGXo8k9FEEAwEHJ5ej7WI4OADmHpehAevN7E0+Po+wEAQAAFhtZGNRFoxMjcdkL4jyKIADgIJajA0BuMubUUvTpY+gEAdKRb3AcVpRxWAAAwAbzJw7uBWEkluMoggCAgyZWJKr+e+kEAYCccqSjX8d7wvJ5PZpSVeR2OABOI7mrh04QAABgh/kXjJTEcnQ3UAQBAAddkByH1dYjY3iCDQC5YtehxCisi0YXKpTnczkaAKeT3AkSidEJAgAArJfsBNnT3KmO3ojL0eQWiiAA4KBx5QXyeT3qi8TU3NnvdjgAAIck94HMYBQWkLb8XhajAwAA+4wuCmn8yAIZI715tMPtcHIKRRAAcFCez6tx5QWSGIkFALlkV7IIMpYiCJCu/IPjsCIUQQAAgE2qS0KSpNauAZcjyS0UQQDAYVNriiVJv32n1eVIAABOMMakOkFYig6kLzpBAACA3UYVUQRxA0UQAHDYn8+oliT9x84jivMkGwCy3tH3LUWfWl3sdjgAzuD9RRB2twEAADtUFAYkSW3dYZcjyS0UQQDAYVdNGa3CoF9HOvq1remk2+EAAGyWHIXFUnQgvSXHYUlSJEYRBAAAWK+iMChJauumE8RJFEEAwGGhPJ8WTauSJP37zsMuRwMAsBujsIDMkOwEkRiJBQAA7DGKIogrKIIAgAs+P7tGkrRxV7MisbjL0QAA7JRaik4RBEhrft+pIkgkTn4GAACsN6ooUQRhJ4izKIIAgAuumDRSI0cEdKInrFffbXM7HACATViKDmQOv/fU0+MY47AAAIANGIflDoogAOACv8+ra2cmFqS/uPOIy9EAAOzS3Nmvtu6wvB6xFB1Icz6vR57BZhA6QQAAgB0qihKL0Y93hxVn/KZjKIIAgEu+MDgS6xdvNqs/EnM5GgCAHXYdSi5FL1J+gKXoQLrLG+wGYScIAACww8gRiU6QaNyooy/icjS5gyIIALjk0nFlGlOar55wTL9565jb4QAAbPDGkU5JjMICMoVvcDl6lHFYAADABgG/V6UFeZKkVkZiOYYiCAC4xOPx6LpZiW6Qf9952OVoAAB2eCO1FJ1RWEAmSC5Hj8QYhwUAAOyR2gvCcnTHUAQBABclR2K9/HYrbZAAkIV2JYsgY+kEATKBf7AThHFYAADALhWFib0gdII4hyIIALhoSlWRLhpdqHA0rh++sk/G8IQbALJFS2e/WrsGBpeiUwQBMoHfl3iKHGEcFgAAsMmoopAkqZVOEMf43Q4AAHKZx+PR31w2Xg0vvql/3PyOjrT36YHrpyuUZ8/yXGMSi7eaO/vV0jmguDEK+r2DHz75fR75vR55PR75vV75fR4F/V6F8nwK+r2pFwYAAB8vuRT9wtGFLEUHMgSdIAAAwG7JTpC27rDLkeQOiiAA4LKbF4xXJBbXgxv36KfbDumdli49sWSOqkvyz/o+YnGjox19OtLeryPtfTrc3qfmjn6190XU0RdRR29YJ3sjOtbVr/7Iuc+4DuV5dXFVsabVFGt6TYkuqS7SyBFBFQR9GhHwK5TnlcfjOef7B4BskhyFxVJ0IHOkdoLE2QkCAADskdoJwjgsx1AEAQCXeTwefXnhRE2pKtbyH2/XHw916LrHXtUtCy/Q5KoiXVxZpOqSkDwej2Jxo5O9YZ3oCeu9Y93aeahdfzzYrjcOd6p7IHrW37OsIE+VxSH5fR6Fo3ENROMaiMQVjccVixtF40bxuFEkZhR+32LQ/khcfzyY+J6n/1mkoN+rgM+rYJ5PAZ9X18yo0t3XTj3fYwKAjHNqKTpFECBT5HkTXa9RxmEBAACbjKII4jiKIACQJj51UYX+Y/mndMu//EFvNXdp1c/fSv1eUdAvv8+j9r6IzrQ2JODzqqY0pOqSfNWU5qu6JKSyEQGV5uepJD9PpQV5Gl0U0uji4LDGbcXjiUJIfySm4z1h7T7SqTePdOrNIx36U0uXuvqj6g3HJEnGJAol/ZG41J8oyqz73T4tuWyCxo0sOPfDAYAMtIsiCJBxfIPjsKJ0ggAAAJuMKkoUQdgJ4hyKIACQRmrLC/TcrZfr/zUe0OuHO/ROS5f2tvao6wNdHqUFeRpblq+ZY0s1e2ypZtaW6MJRhbbs7PB6PQp5fQrl+VRaENCkUYW6blbNkGvicaO+SEw94agGInGFY4nOkm//x5v6/b4Tem7HIf1d3WTLYwOAdHWss1/HkkvRa4rdDgfAWUrmUnSCAAAAuzAOy3kUQQAgzRQE/PrqlZNSn4ejce0/3iNJKh/s7Ei3BeVer0cjgn6NCA59WLlp3rhEEWT7YX3zMxexLwRAzvjj+5aiFwRIuYFMwWJ0AABgt4qixGL0491hxeNGXi+vldgtvV5FAwB8SMDv1eTKIk2uLFJFYTDtCiAf5epplRoR8KnpRK/+cOCk2+EAgGNeP9QuSZo5ttTVOAAMT2oxeoxxWAAAwB4jRyQ6QaJxo46+iMvR5IbMeSUNAJBxCgJ+fW5GtSTpue2HXI4GAJyz82C7JGlWbamrcQAYntRidDpBAACATQJ+r0oL8iRJrYzEcgRFEACArf7q0rGSpJ+9flT9kZjL0QCA/YwxqaXos8ayFB3IJKcWo1MEAQAA9kntBWE5uiMoggAAbDX/gnKNKc1XV39Uv9rd4nY4AGC7phO9au+NKODzakoVS9GBTJIchxVlHBYAALBRRWFiLwidIM6gCAIAsJXX69FffGKMJEZiAcgNyaXol1QXKeAn3QYyiZ9OEAAA4IBUJ0h32OVIcgPPygAAtvvLSxNFkN++06ZjXf0uRwMA9np9cB8IS9GBzOP3De4EiVEEAQAA9hlVlCiCtDIOyxEUQQAAtps4qlCfGFeqWNzoxZ1H3A4HAGz1+mAnCEvRgcyT7ASJxRmHBQAA7HOqE4QiiBMoggAAHPGXgwvS/237YZcjAQD7RGNxlqIDGSzZCRKhEwQAANhoFEUQR1EEAQA44rqZ1crzebTnaKcOnuh1OxwAsMW7rd3qi8Q0IuDTxFGFbocDYJjyUjtB6AQBAAD2qSgaXIzOOCxHUAQBADiitCCg8SNHSJIOHKcIAiA7vX4w0QUyfUyJfIMvpgLIHD4WowMAAAeMKgxJohPEKRRBAACOGVdeIElqohMEQJb646F2SewDATIVi9EBAIATkp0gx7vDivPmC9tRBAEAOCZZBDlwosflSADAHqml6GNL3Q0EwDnx0wkCAAAcMHJEYidING7U0RdxOZrsRxEEAOCY2sEiCDtBAGSjgWhMbzV3SpJmshQdyEh+32ARJMZOEAAAYJ+A36vSgjxJUisjsWxHEQQA4JjxjMMCkMX2HO1SJGZUPiKgsWX5bocD4BzkJcdh0QkCAABsVlGY6AZpYzm67SiCAAAcM27kYBGExegAstAfD7ZLSnSBeDwsRQcyUXIcVjhKJwgAALBXRWFiLwidIPajCAIAcExtWaII0tkfVUcvMy8BZJfkUvSZ7AMBMlZhyC9J6h6IuhwJAADIdqlOkO6wy5FkP4ogAADH5Ad8GlWUeJBnOTqAbJNcij67ln0gQKYqyU/M5mZBKQAAsFvy9ZFWxmHZjiIIAMBR49gLAiALdQ9E9V5rtyQ6QYBMVpqfGEtBxyoAALDbqU4QiiB2owgCAHAUy9EBZKNdhzpkjDSmND/1ZAZA5qETBGdr7dq1mjBhgkKhkObPn6+tW7ee8dp169Zp4cKFKisrU1lZmerq6j7yegBAbhhFEcQxFEEAAI6qHSyCHKQIAiCL7Dh4UpI0i1FYQEYrLUgUQdr7mM2NM3v22WdVX1+vhoYGbd++XbNmzdKiRYt07Nix016/ZcsW3XTTTXr55ZfV2Nio2tpaXX311Tp8+LDDkQMA0klFUaIDlSKI/SiCAAAclRyHdeA4RRAA2WP7gUQR5NJxZS5HAuB80AmCs/HII4/olltu0bJlyzR16lQ98cQTKigo0Pr16097/dNPP61bb71Vs2fP1pQpU/Tkk08qHo9r8+bNDkcOAEgnowpDktgJ4gSKIAAAR40byTgsANnFGKPtTe2SpEvHUwQBMlnJYCdIfySu/kjM5WiQjsLhsLZt26a6urrUbV6vV3V1dWpsbDyr++jt7VUkElF5ebldYQIAMkCyE+R4d1jxuHE5muxGEQQA4KjkTpAj7X2KxOIuRwMA5+/A8V6d6Akr4PNqWk2x2+EAOA+FAb+8nsR/d9INgtNoa2tTLBZTZWXlkNsrKyvV3Nx8Vvdxxx13qKamZkgh5YMGBgbU2dk55AMAkF1GjkjsBInGDV2oNqMIAgBw1KiioIJ+r+ImUQgBgEy3vSkxCmvG2BIF/T6XowFwPrxeT2okVjsvRsAGq1ev1oYNG/T8888rFAqd8bpVq1appKQk9VFbW+tglAAAJwT83lTe0cpeEFtRBAEAOMrj8bAXBEBW2ZbaB1LqbiAALMFeEHyUiooK+Xw+tbS0DLm9paVFVVVVH/m1a9as0erVq/XLX/5SM2fO/MhrV6xYoY6OjtTHwYMHzzt2AED6GVWU6AZpYy+IrSiCAAAclyyCsBcEQDZI7QNhKTqQFUoKEvO523spguDDAoGA5syZM2SpeXLJ+YIFC874dQ8//LDuv/9+bdq0SXPnzv3Y7xMMBlVcXDzkAwCQfSoKE3kHnSD28rsdAAAg9ySXox+kCAIgw3UPRPV2c2JOO0vRgexAJwg+Tn19vZYuXaq5c+dq3rx5evTRR9XT06Nly5ZJkm6++WaNGTNGq1atkiQ99NBDWrlypZ555hlNmDAhtTuksLBQhYWFrv0cAAD3VRQmOkFa6QSxFUUQAIDj6AQBkC1eP9iuuJHGlOarsvjMs90BZI7S5E6Q3rDLkSBdLV68WK2trVq5cqWam5s1e/Zsbdq0KbUsvampSV7vqcEbjz/+uMLhsG644YYh99PQ0KD77rvPydABAGkmNQ6rm7zDThRBAACOowgCIFuk9oHQBQJkjWQnSCedIPgIy5cv1/Lly0/7e1u2bBny+f79++0PCACQkZJFEDpB7MVOEACA41JFkOO9Msa4HA0AnLvtTSxFB7JNacFgJwhFEAAAYLNRyXFY7ASxFUUQAIDjageLIF0DUZaOAshYxhjtONguiaXoQDZhJwgAAHBKahwWnSC2oggCAHBcKM+nyuLEAz0jsQBkqr1tPWrvjSiU59XUmmK3wwFgkZLUThCKIAAAwF6pcVh0gtjqnIoga9eu1YQJExQKhTR//nxt3br1jNeuW7dOCxcuVFlZmcrKylRXV/eR1wMAcgN7QQBkuuQ+kJljSpXn471FQLagEwQAADglWQQ53j2gWJxx4XYZ9rO1Z599VvX19WpoaND27ds1a9YsLVq0SMeOHTvt9Vu2bNFNN92kl19+WY2NjaqtrdXVV1+tw4cPn3fwAIDMVUsRBECG2zG4D+QT40vdDQSApUoLApIoggAAAPuVFwTk8UhxI53oCbsdTtYadhHkkUce0S233KJly5Zp6tSpeuKJJ1RQUKD169ef9vqnn35at956q2bPnq0pU6boySefVDwe1+bNm887eABA5nr/cnQAyETbD7RLYh8IkG3oBAEAAE7x+7waOSLxBoxW9oLYZlhFkHA4rG3btqmuru7UHXi9qqurU2Nj41ndR29vryKRiMrLy894zcDAgDo7O4d8AACyy/iRdIIAyFyd/RH96ViXJIogQLYpLThVBDGGsRQAAMBeFYXsBbHbsIogbW1tisViqqysHHJ7ZWWlmpubz+o+7rjjDtXU1AwppHzQqlWrVFJSkvqora0dTpgAgAzAThAAmWxnU7uMSfxblpzjCyA7JDtBYnGj7oGoy9EAAIBsl3w+0UYniG0c3eC4evVqbdiwQc8//7xCodAZr1uxYoU6OjpSHwcPHnQwSgCAE5I7QY529CkcjbscDQAMT3Ip+qXjSt0NBIDlQnk+Bf2Jp8rtvYzEAgAA9koWQegEsY9/OBdXVFTI5/OppaVlyO0tLS2qqqr6yK9ds2aNVq9erV//+teaOXPmR14bDAYVDPKOOgDIZqMKg8rP86kvEtPh9j5dUDHC7ZAA4Kw17j0uSZp3wUiXIwFgh5L8PB3rGlBHX0TMJQAAAHZKFUHoBLHNsDpBAoGA5syZM2SpeXLJ+YIFC874dQ8//LDuv/9+bdq0SXPnzj33aAEAWcPj8aRGYh043uNyNABw9vrCMe1sapckXT6JIgiQjd6/FwQAAMBOowopgtht2OOw6uvrtW7dOj311FPas2ePvv71r6unp0fLli2TJN18881asWJF6vqHHnpI9957r9avX68JEyaoublZzc3N6u7utu6nAABkpNryfEnSoZN9LkcCAGdv24GTCsfiqikJafzIArfDAWCD5F4QiiAAAMBudILYb1jjsCRp8eLFam1t1cqVK9Xc3KzZs2dr06ZNqWXpTU1N8npP1VYef/xxhcNh3XDDDUPup6GhQffdd9/5RQ8AyGhjyxIvHh48yXJ0AJnjtffaJEmXTRopj8fjcjQA7FCSH5DEThAAAGC/VCcIO0FsM+wiiCQtX75cy5cvP+3vbdmyZcjn+/fvP5dvAQDIAWPL6AQBkHleey+xD+TySRUuRwLALnSCAAAAp9AJYr9hj8MCAMAqyU6QQyfoBAGQGbr6I9p1uEOStIB9IEDWSu4Eae8LuxwJAADIdskiSEdfRAPRmMvRZCeKIAAA17ATBECm+e/9JxSLG00YWaAxpfluhwPAJslOkE46QQAAgM1K8vOU50uM2T3ezRsw7EARBADgmmQnyPGesHoGoi5HAwAf77V3E6Ow6AIBsluqE4SdIAAAwGYej+fUXhBGYtmCIggAwDUl+XkqDiXWUx1upxsEQPpL7gNZwD4QIKuxEwQAADipgr0gtqIIAgBwVW15ohvkIHtBAKS5kz1h7WnulCQtmEgnCJDNkkUQOkEAAIATUp0g3RRB7EARBADgqrFl7AUBkBl+v++4jJEuGl2YWl4IIDvRCQIAAJw0ik4QW1EEAQC4qraMThAAmSE5Cuty9oEAWa+0ICCJIggAAHBGsgjSRieILSiCAABcRScIgEzBPhAgdyQ7QboHoorE4i5HAwAAsh2dIPaiCAIAcFVqJ8hJOkEApK9jnf1691i3PB7psonlbocDwGbFIX/qvzvpBgEAADZL7QShCGILiiAAAFeNHRyHRScIgHTWuDfRBTK1ujg1JgdA9vL7vCoKJgohjMQCAAB2S3WCMA7LFhRBAACuSo7D6uiLqLOfFxkApKfX3mUfCJBrSgoSI7HaKYIAAACbVdAJYiuKIAAAV40I+lU+IvGu6kMn6AYBkH5icaPNb7VIkhZeNMrlaAA4JbkXhE4QAABgt2QnSG84pp6BqMvRZB+KIAAA19UOdoOwFwRAOvr9vuNq6w6rtCBPC+gEAXJG6WAnSEcvRRAAAGCvEUG/CgI+SVIbI7EsRxEEAOA69oIASGc/39UsSbp6aqXyfKTPQK6gEwQAADgptReEkViW41kcAMB1Y8sHO0FO0AkCIL3E4kY/fyNRBPncjGqXowHgpJL8xLjOdjpBAACAA0axF8Q2FEEAAK6rpRMEQJr67/0n1NY9oOKQX1dMqnA7HAAOohMEAAA4KdUJwjgsy1EEAQC4buzgTpBD7AQBkGZ+vuuoJOmzU6sU8JM6A7kkuROkvS/sciQAACAXMA7LPjyTAwC4rrb8VCeIMcblaAAgIf6+UVjXzqxyORoATkt2gnTSCQIAABxQwTgs21AEAQC4bkxpohOkeyDK3G0AaWNb00kd6xpQUcivKy5kFBaQa0oHiyDkJgAAwAnJTpA2xmFZjiIIAMB1oTyfRg8+2LMXBEC6eOn1wVFYl1Qq6Pe5HA0Ap7ETBAAAOInF6PahCAIASAvJvSAH2QsCIA0kRmEliiDXzKh2ORoAbihJ7QShCAIAAOzHThD7UAQBAKSFU3tBKIIAcN+OgyfV0jmgwqBfCyczCgvIRe/vBGFnGQAAsFuqCNI9QO5hMYogAIC0kOoEOcE4LADue+n1xEL0uktGMwoLyFGlBQFJUjgaV38k7nI0AAAg240sTOQekZhhHKfFKIIAANJCbVmiE4RxWADc1heO6cU/HpEkfY5RWEDOGhHwyef1SGIvCAAAsF/Q71Pp4DhORmJZiyIIACAtjC1LjsOiEwSAu/6lcb/augc0tixfV1082u1wALjE4/GoND+5FyTscjQAACAXVLAc3RYUQQAAaaG2PDEO69DJXmZfAnBNZ39Ej//ne5Kkb37mIgX8pMtALkvtBemlEwQAANhvVOGpvSCwDs/qAABpobokXx6P1B+Jq62bd1sCcMcPf7dP7b0RTRo1Qn/xiTFuhwPAZSUFyU4QiiAAAMB+qeXodIJYiiIIACAtBPxeVReHJLEXBIA7TvSE9cNX9kmS6j97sfw+UmUg16U6QSiCAAAAB6SKIHSCWIpndgCAtDG2nL0gANzzxH++p+6BqKbVFOtz06vcDgdAGihlHBYAAHBQdUnizaHb9p90OZLsQhEEAJA2xpYl9oLsPtLpciQAck1LZ7+eem2/JOl/X32xvF6PuwEBSAt0ggAAACddN6tGAZ9XfzhwUlv3nXA7nKxBEQQAkDY+PWW0JOlfGvfrWFe/y9EAyCWP/eYdDUTjmju+TH928Si3wwGQJkoKApKk9j72lQEAAPtVFof0V3PGSpK+v+Vdl6PJHhRBAABp49oZ1ZpVW6recEyP/vodt8MBkCN+8oeDeub3TZKk/73oYnk8dIEASDjVCRJ1ORIAAJArvnblRHk90pa3W/XmkQ63w8kKFEEAAGnD4/HonmsvkSRt2NqkP7V0uRwRgGz35O/26ls/fV1xI31x/jhdNnGk2yEBSCPJnSDvHutWNBZ3ORoAAJALxo8coT+fWSNJ+v6W91yOJjtQBAEApJVPTijX/5xWpbiRVm3c43Y4ALKUMUZrfvG2Hngp8e/MLQsv0Hevn+5yVADSzbwLypWf59Oeo51a+eKbMsa4HRIAAMgBX/+zSZKkn+86qn1tPS5Hk/koggAA0s4dn5siv9ejl99u1SvvtLkdDtLQ2rVrNWHCBIVCIc2fP19bt279yOt/8pOfaMqUKQqFQpoxY4Y2btzoUKRIRyd7wrrr+Tf0vZcTM3a/tehi3XXNJYzBAvAhteUF+r//a7Y8HumZ3zfpB7/d63ZIAAAgB1xSXaxPTxmtuJH+6T/pBjlfFEEAAGnngooR+pvLxkuSHnhpt2Jx3nWJU5599lnV19eroaFB27dv16xZs7Ro0SIdO3bstNe/9tpruummm/SlL31JO3bs0PXXX6/rr79eb7zxhsORw03xuNHv3mnV8me2a/6Dm/XjrU3yeKQHrp+u2666kAIIgDO6elqV7rl2qiRp1c/f0sZdR12OCAAA5ILbrkp0g/zb9kM62tHncjSZzWMyoJ+3s7NTJSUl6ujoUHFxsdvhAAAccLInrP/xf15WV39U186s1tzxZZo0qlAXji5UaUGevB6PPB7Jo8SvcWNkjHSmR7V0eX0zlOez7L5y9fFx/vz5+uQnP6nvfe97kqR4PK7a2lp94xvf0J133vmh6xcvXqyenh797Gc/S9122WWXafbs2XriiSfO6ntafdatXQPq6Auf9/1kquTfU/Ohz4f+BU7+/fYosTPI65F8Xs+pv/8ez+DvSXEj9Udigx9xdfVHdPBEr/Yf79WB4z1680injnb0p+57+phi/d1nJqtuaqXtPy+AzGeM0X0vvqmnGg8o6PfqkRtn64KKESopyFNxyC+/16vecFS94Zh6wlENROLyeDQkX/F6B3/1uJuXXDi6yLL7ytVcxA2cNQDkphv/qVFb953QJ8aV6s8mj9bFVYW6uKpYo4uCqTwj+evHvS7yQW7lI0G/17I3oZ3t46Pfku8GAIDFykYE9M3PXKQHXtqjl14/qpdez/x3XZYW5GnnyqvdDiOjhcNhbdu2TStWrEjd5vV6VVdXp8bGxtN+TWNjo+rr64fctmjRIr3wwgtn/D4DAwMaGBhIfd7Z2Xl+gX/Ak7/bq39ipIrjikJ+/cUnxujGubWaPqbE7XAAZBCPx6OV103ToZN92vzWMd32zHa3QzonPq9H7z14jdthAACAs/SNT1+oJT/cqh1N7drR1O52OJbY/Z1FKgg4W5agCAIASFtf+tQFGj9yhHY0ndS7x7r1Xmu3DhzvVZTxWDmrra1NsVhMlZVD371fWVmpt95667Rf09zcfNrrm5ubz/h9Vq1apW9/+9vnH/AZhPJ8KivIs+3+3XSmv50ffJ9P8p0/ntTnp7kvk7g/Y4ziJvHOpnj81H+bwW9oZOSRR8E8r0J5PoXyvBoR8Ku2vEATRhZo/MgRuqBihOaML7O0GwtAbvF5PfrHmz6he194QzsPtauzL6rOvojCsXjqmvw8nwoCPgX93sF/vxL/RsUH35WZ+Pcs8fkH/91z4s2YPm+atMYCAICzsvCiUfrp1xZoe9NJvd3crbdbOvVOS7cGovGP/2KkUAQBAKQtj8ejz06t1GffN64mGosrHIufehE0nnhxwetNjMVJtoG+3wdbQSmh4OOsWLFiSPdIZ2enamtrLbv/2z87Wbd/drJl9wcAcMaIoF+PLJ495Lb+SEzRuFFBnk9eigwAAMBicyeUa+6E8tTnsbjRQDQ2+CYLk/r1fF8XcSqLyXfhjWkUQQAAGcXv88rv87odBlxSUVEhn8+nlpaWIbe3tLSoqqrqtF9TVVU1rOslKRgMKhgMnn/AAICsR4cZAABwks/rcXycVKbjVSQAAJAxAoGA5syZo82bN6dui8fj2rx5sxYsWHDar1mwYMGQ6yXpV7/61RmvBwAAAAAA2YOSEQAAyCj19fVaunSp5s6dq3nz5unRRx9VT0+Pli1bJkm6+eabNWbMGK1atUqS9M1vflNXXnml/uEf/kHXXnutNmzYoD/84Q/6wQ9+4OaPAQAAAAAAHEARBAAAZJTFixertbVVK1euVHNzs2bPnq1Nmzallp83NTXJ6z3V7Hr55ZfrmWee0T333KO77rpLF110kV544QVNnz7drR8BAAAAAAA4xGPMB9eipJ/Ozk6VlJSoo6NDxcXFbocDAEBa4PHROZw1AAAfxuOjczhrAAA+7GwfH9kJAgAAAAAAAAAAshJFEAAAAAAAAAAAkJUoggAAAAAAAAAAgKxEEQQAAAAAAAAAAGQliiAAAAAAAAAAACArUQQBAAAAAAAAAABZiSIIAAAAAAAAAADIShRBAAAAAAAAAABAVqIIAgAAAAAAAAAAshJFEAAAAAAAAAAAkJUoggAAAAAAAAAAgKxEEQQAAAAAAAAAAGQliiAAAAAAAAAAACArUQQBAAAAAAAAAABZiSIIAAAAAAAAAADIShRBAAAAAAAAAABAVqIIAgAAAAAAAAAAshJFEAAAAAAAAAAAkJX8bgdwNowxkqTOzk6XIwEAIH0kHxeTj5OwD7kIAAAfRi7iHHIRAAA+7GxzkYwognR1dUmSamtrXY4EAID009XVpZKSErfDyGrkIgAAnBm5iP3IRQAAOLOPy0U8JgPeshGPx3XkyBEVFRXJ4/Gc8/10dnaqtrZWBw8eVHFxsYUR5ibO01qcp7U4T+twltay8jyNMerq6lJNTY28XiZc2olcJD1xntbiPK3FeVqHs7QWuUhmIhdJT5yntThPa3Ge1uEsreVGLpIRnSBer1djx4617P6Ki4v5A2shztNanKe1OE/rcJbWsuo8edelM8hF0hvnaS3O01qcp3U4S2uRi2QWcpH0xnlai/O0FudpHc7SWk7mIrxVAwAAAAAAAAAAZCWKIAAAAAAAAAAAICvlVBEkGAyqoaFBwWDQ7VCyAudpLc7TWpyndThLa3GeuY3//9biPK3FeVqL87QOZ2ktzjO38f/fWpyntThPa3Ge1uEsreXGeWbEYnQAAAAAAAAAAIDhyqlOEAAAAAAAAAAAkDsoggAAAAAAAAAAgKxEEQQAAAAAAAAAAGQliiAAAAAAAAAAACArZV0RZO3atZowYYJCoZDmz5+vrVu3fuT1P/nJTzRlyhSFQiHNmDFDGzdudCjSzDCc81y3bp0WLlyosrIylZWVqa6u7mPPP9cM989n0oYNG+TxeHT99dfbG2AGGe5Ztre367bbblN1dbWCwaAmT57M3/f3Ge55Pvroo7r44ouVn5+v2tpa3X777erv73co2vT229/+Vtddd51qamrk8Xj0wgsvfOzXbNmyRZdeeqmCwaAuvPBC/ehHP7I9TtiHXMRa5CLWIhexDrmItchFrEMuAnIRa5GLWItcxFrkI9YhF7FOWuYiJots2LDBBAIBs379evPmm2+aW265xZSWlpqWlpbTXv/qq68an89nHn74YbN7925zzz33mLy8PLNr1y6HI09Pwz3PL37xi2bt2rVmx44dZs+ePeZv//ZvTUlJiTl06JDDkaen4Z5n0r59+8yYMWPMwoULzRe+8AVngk1zwz3LgYEBM3fuXHPNNdeYV155xezbt89s2bLF7Ny50+HI09Nwz/Ppp582wWDQPP3002bfvn3mF7/4hamurja33367w5Gnp40bN5q7777bPPfcc0aSef755z/y+r1795qCggJTX19vdu/ebR577DHj8/nMpk2bnAkYliIXsRa5iLXIRaxDLmItchFrkYvkNnIRa5GLWItcxFrkI9YhF7FWOuYiWVUEmTdvnrnttttSn8diMVNTU2NWrVp12utvvPFGc+211w65bf78+earX/2qrXFmiuGe5wdFo1FTVFRknnrqKbtCzCjncp7RaNRcfvnl5sknnzRLly7lwX7QcM/y8ccfNxMnTjThcNipEDPKcM/ztttuM5/+9KeH3FZfX2+uuOIKW+PMRGfzYP/3f//3Ztq0aUNuW7x4sVm0aJGNkcEu5CLWIhexFrmIdchFrEUuYh9ykdxDLmItchFrkYtYi3zEOuQi9kmXXCRrxmGFw2Ft27ZNdXV1qdu8Xq/q6urU2Nh42q9pbGwccr0kLVq06IzX55JzOc8P6u3tVSQSUXl5uV1hZoxzPc/vfOc7Gj16tL70pS85EWZGOJezfPHFF7VgwQLddtttqqys1PTp0/Xggw8qFos5FXbaOpfzvPzyy7Vt27ZUa+jevXu1ceNGXXPNNY7EnG14LMoe5CLWIhexFrmIdchFrEUu4j4ei7IHuYi1yEWsRS5iLfIR65CLuM+JxyK/Zffksra2NsViMVVWVg65vbKyUm+99dZpv6a5ufm01zc3N9sWZ6Y4l/P8oDvuuEM1NTUf+kOci87lPF955RX98Ic/1M6dOx2IMHOcy1nu3btXv/nNb/TXf/3X2rhxo959913deuutikQiamhocCLstHUu5/nFL35RbW1t+tSnPiVjjKLRqL72ta/prrvuciLkrHOmx6LOzk719fUpPz/fpcgwXOQi1iIXsRa5iHXIRaxFLuI+cpHsQS5iLXIRa5GLWIt8xDrkIu5zIhfJmk4QpJfVq1drw4YNev755xUKhdwOJ+N0dXVpyZIlWrdunSoqKtwOJ+PF43GNHj1aP/jBDzRnzhwtXrxYd999t5544gm3Q8tIW7Zs0YMPPqjvf//72r59u5577jm99NJLuv/++90ODQBSyEXOD7mItchFrEUuAiATkIucH3IR65GPWIdcJPNkTSdIRUWFfD6fWlpahtze0tKiqqqq035NVVXVsK7PJedynklr1qzR6tWr9etf/1ozZ860M8yMMdzzfO+997R//35dd911qdvi8bgkye/36+2339akSZPsDTpNncufzerqauXl5cnn86Vuu+SSS9Tc3KxwOKxAIGBrzOnsXM7z3nvv1ZIlS/TlL39ZkjRjxgz19PToK1/5iu6++255vdTXh+NMj0XFxcW88zLDkItYi1zEWuQi1iEXsRa5iPvIRbIHuYi1yEWsRS5iLfIR65CLuM+JXCRr/o8EAgHNmTNHmzdvTt0Wj8e1efNmLViw4LRfs2DBgiHXS9KvfvWrM16fS87lPCXp4Ycf1v33369NmzZp7ty5ToSaEYZ7nlOmTNGuXbu0c+fO1MfnP/95XXXVVdq5c6dqa2udDD+tnMufzSuuuELvvvtuKmGSpD/96U+qrq7O2Qf5pHM5z97e3g89oCeTqMTOKwwHj0XZg1zEWuQi1iIXsQ65iLXIRdzHY1H2IBexFrmItchFrEU+Yh1yEfc58lhk2Yr1NLBhwwYTDAbNj370I7N7927zla98xZSWlprm5mZjjDFLliwxd955Z+r6V1991fj9frNmzRqzZ88e09DQYPLy8syuXbvc+hHSynDPc/Xq1SYQCJif/vSn5ujRo6mPrq4ut36EtDLc8/ygpUuXmi984QsORZvehnuWTU1NpqioyCxfvty8/fbb5mc/+5kZPXq0eeCBB9z6EdLKcM+zoaHBFBUVmR//+Mdm79695pe//KWZNGmSufHGG936EdJKV1eX2bFjh9mxY4eRZB555BGzY8cOc+DAAWOMMXfeeadZsmRJ6vq9e/eagoIC861vfcvs2bPHrF271vh8PrNp0ya3fgScB3IRa5GLWItcxDrkItYiF7EWuUhuIxexFrmItchFrEU+Yh1yEWulYy6SVUUQY4x57LHHzLhx40wgEDDz5s0z//Vf/5X6vSuvvNIsXbp0yPX/+q//aiZPnmwCgYCZNm2aeemllxyOOL0N5zzHjx9vJH3oo6GhwfnA09Rw/3y+Hw/2Qw33LF977TUzf/58EwwGzcSJE813v/tdE41GHY46fQ3nPCORiLnvvvvMpEmTTCgUMrW1tebWW281J0+edD7wNPTyyy+f9t/C5BkuXbrUXHnllR/6mtmzZ5tAIGAmTpxo/vmf/9nxuGEdchFrkYtYi1zEOuQi1iIXsQ65CMhFrEUuYi1yEWuRj1iHXMQ66ZiLeIyhRwcAAAAAAAAAAGSfrNkJAgAAAAAAAAAA8H4UQQAAAAAAAAAAQFaiCAIAAAAAAAAAALISRRAAAAAAAAAAAJCVKIIAAAAAAAAAAICsRBEEAAAAAAAAAABkJYogAAAAAAAAAAAgK1EEAQAAAAAAAAAAWYkiCAAAAAAAAAAAyEoUQQAAAAAAAAAAQFaiCAIAAAAAAAAAALISRRAAAAAAAAAAAJCV/j9ru8hd/78s9AAAAABJRU5ErkJggg==",
"text/plain": [
"
"
]
},
"metadata": {},
"output_type": "display_data"
+ },
+ {
+ "ename": "",
+ "evalue": "",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[1;31mThe Kernel crashed while executing code in the the current cell or a previous cell. Please review the code in the cell(s) to identify a possible cause of the failure. Click here for more info. View Jupyter log for further details."
+ ]
}
],
"source": [
@@ -215,7 +281,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.6 (main, Nov 14 2022, 16:10:14) [GCC 11.3.0]"
+ "version": "3.9.16"
},
"orig_nbformat": 4,
"vscode": {
diff --git a/MindFlow/applications/cfd/sod/solve_sod.py b/MindFlow/applications/cfd/sod/solve_sod.py
index 49c0576b300862bf709b49d4fb945c097f6971db..9f6fb6eb0c8902102bef4cf72d2bfb815cfe70f3 100644
--- a/MindFlow/applications/cfd/sod/solve_sod.py
+++ b/MindFlow/applications/cfd/sod/solve_sod.py
@@ -13,20 +13,60 @@
# limitations under the License.
# ==============================================================================
"""solve sod tube flow"""
+import argparse
+
from mindspore import context
-from mindflow import load_yaml_config, vis_1d
-from mindflow import cfd
+from src.ic import sod_ic_1d
+
+from mindflow import cfd, load_yaml_config, vis_1d
from mindflow.cfd.runtime import RunTime
from mindflow.cfd.simulator import Simulator
-from src.ic import sod_ic_1d
+parser = argparse.ArgumentParser(description="Sod compute")
+parser.add_argument(
+ "--mode",
+ type=str,
+ default="GRAPH",
+ choices=["GRAPH", "PYNATIVE"],
+ help="Running in GRAPH_MODE OR PYNATIVE_MODE",
+)
+parser.add_argument(
+ "--save_graphs",
+ type=bool,
+ default=False,
+ choices=[True, False],
+ help="Whether to save intermediate compilation graphs",
+)
+parser.add_argument("--save_graphs_path", type=str, default="./graphs")
+parser.add_argument(
+ "--device_target",
+ type=str,
+ default="GPU",
+ choices=["GPU", "Ascend"],
+ help="The target device to run, support 'Ascend', 'GPU'",
+)
+parser.add_argument("--device_id", type=int, default=0, help="ID of the target device")
+parser.add_argument("--config_file_path", type=str, default="./numeric.yaml")
+parser.add_argument("--reconstructor", type=str, choices=["WENO3", "WENO5", "WENO7"], default="WENO5")
+parser.add_argument("--riemann_computer", type=str, choices=["HLLC", "Roe", "Rusanov"], default="Roe")
+
+args = parser.parse_args()
-context.set_context(device_target="GPU", device_id=3)
+context.set_context(
+ mode=context.GRAPH_MODE if args.mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
+ save_graphs=args.save_graphs,
+ save_graphs_path=args.save_graphs_path,
+ device_target=args.device_target,
+ device_id=args.device_id,
+)
+print(f"Running in {args.mode.upper()} mode, using device id: {args.device_id}.")
-config = load_yaml_config('numeric.yaml')
+config = load_yaml_config(args.config_file_path)
+config["space_solver"]["convective_flux"]["reconstructor"] = args.reconstructor
+config["space_solver"]["convective_flux"]["riemann_computer"] = args.riemann_computer
simulator = Simulator(config)
-runtime = RunTime(config['runtime'], simulator.mesh_info, simulator.material)
+runtime = RunTime(config["runtime"], simulator.mesh_info, simulator.material)
mesh_x, _, _ = simulator.mesh_info.mesh_xyz()
pri_var = sod_ic_1d(mesh_x)
@@ -39,4 +79,4 @@ while runtime.time_loop(pri_var):
runtime.advance()
pri_var = cfd.cal_pri_var(con_var, simulator.material)
-vis_1d(pri_var, 'sod.jpg')
+vis_1d(pri_var, "sod.jpg")
diff --git a/MindFlow/applications/data_driven/airfoil/2D_steady/README.MD b/MindFlow/applications/data_driven/airfoil/2D_steady/README.MD
index 0f586ad93d51ee74d9c456f42e0641f2765b2c4c..8dfd0bb663256a7457674df38f2417f73de36b77 100644
--- a/MindFlow/applications/data_driven/airfoil/2D_steady/README.MD
+++ b/MindFlow/applications/data_driven/airfoil/2D_steady/README.MD
@@ -60,6 +60,8 @@ Aiming at the technical difficulties mentioned above, we designed an AI model-ba

+## Results Display
+
When the airfoil geometry changes, the surface pressure distribution, flow field distribution, and error statistics predicted by AI and CFD are as follows:
! [airfoil.gif](images/airfoil.gif)
@@ -68,4 +70,11 @@ When the angle of attack of the incoming flow changes, the surface pressure dist
! [aoa_var.gif](images/aoa_var.gif)
When the incoming Mach number changes, the surface pressure distribution, flow field distribution, and error statistics predicted by AI and CFD are as follows:
-! [Ma_var.gif](images/Ma_var.gif)
\ No newline at end of file
+! [Ma_var.gif](images/Ma_var.gif)
+
+## Contributor
+
+gitee id: [Brian-K](https://gitee.com/Brian-K)
+
+email: brian_k2023@163.com
+
diff --git a/MindFlow/applications/data_driven/airfoil/2D_steady/README_CN.md b/MindFlow/applications/data_driven/airfoil/2D_steady/README_CN.md
index c62c58889c7f9c52c6e39d2fc767067d1d6d20c9..b70094b013cbfb7f4667f5890c250f9f5a6ad467 100644
--- a/MindFlow/applications/data_driven/airfoil/2D_steady/README_CN.md
+++ b/MindFlow/applications/data_driven/airfoil/2D_steady/README_CN.md
@@ -60,6 +60,8 @@

+## 结果展示
+
翼型几何形状发生改变时,AI和CFD预测的表面压力分布,流场分布及其误差统计如下:

@@ -70,4 +72,10 @@
来流马赫数发生改变时,AI和CFD预测的表面压力分布,流场分布及其误差统计如下:
-
\ No newline at end of file
+
+
+## 贡献者
+
+gitee id: [Brian-K](https://gitee.com/Brian-K)
+
+email: brian_k2023@163.com
\ No newline at end of file
diff --git a/MindFlow/applications/data_driven/airfoil/2D_steady/train.py b/MindFlow/applications/data_driven/airfoil/2D_steady/train.py
index fb9874f958c3839a1f38eeb074ea93fa6deeb116..d2841f912fd9c42a81454cc21948bc1191f28c58 100644
--- a/MindFlow/applications/data_driven/airfoil/2D_steady/train.py
+++ b/MindFlow/applications/data_driven/airfoil/2D_steady/train.py
@@ -40,28 +40,6 @@ from src import AirfoilDataset, plot_u_and_cp, get_ckpt_summ_dir, plot_u_v_p, ca
set_seed(0)
np.random.seed(0)
-parser = argparse.ArgumentParser(description='Airfoil 2D_steady Simulation')
-parser.add_argument("--save_graphs", type=bool, default=False, choices=[True, False],
- help="Whether to save intermediate compilation graphs")
-parser.add_argument("--context_mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
- help="Support context mode: 'GRAPH', 'PYNATIVE'")
-parser.add_argument('--train_mode', type=str, default='train', choices=["train", "eval", "finetune"],
- help="Support run mode: 'train', 'eval', 'finetune'")
-parser.add_argument('--device_id', type=int, default=6, help="ID of the target device")
-parser.add_argument('--device_target', type=str, default='Ascend', choices=["GPU", "Ascend"],
- help="The target device to run, support 'Ascend', 'GPU'")
-parser.add_argument("--config_file_path", type=str, default="./config.yaml")
-parser.add_argument("--save_graphs_path", type=str, default="./graphs")
-args = parser.parse_args()
-
-context.set_context(mode=context.GRAPH_MODE if args.context_mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
- save_graphs=args.save_graphs,
- save_graphs_path=args.save_graphs_path,
- device_target=args.device_target,
- device_id=args.device_id)
-
-use_ascend = (args.device_target == "Ascend")
-
def train():
'''Train and evaluate the network'''
@@ -194,6 +172,29 @@ def train():
if __name__ == '__main__':
print(f'pid: {os.getpid()}')
print(datetime.datetime.now())
+
+ parser = argparse.ArgumentParser(description='Airfoil 2D_steady Simulation')
+ parser.add_argument("--save_graphs", type=bool, default=False, choices=[True, False],
+ help="Whether to save intermediate compilation graphs")
+ parser.add_argument("--context_mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
+ help="Support context mode: 'GRAPH', 'PYNATIVE'")
+ parser.add_argument('--train_mode', type=str, default='train', choices=["train", "eval", "finetune"],
+ help="Support run mode: 'train', 'eval', 'finetune'")
+ parser.add_argument('--device_id', type=int, default=0, help="ID of the target device")
+ parser.add_argument('--device_target', type=str, default='Ascend', choices=["GPU", "Ascend"],
+ help="The target device to run, support 'Ascend', 'GPU'")
+ parser.add_argument("--config_file_path", type=str, default="./config.yaml")
+ parser.add_argument("--save_graphs_path", type=str, default="./graphs")
+ args = parser.parse_args()
+
+ context.set_context(mode=context.GRAPH_MODE if args.context_mode.upper().startswith("GRAPH") \
+ else context.PYNATIVE_MODE,
+ save_graphs=args.save_graphs,
+ save_graphs_path=args.save_graphs_path,
+ device_target=args.device_target,
+ device_id=args.device_id)
+
+ use_ascend = (args.device_target == "Ascend")
print(f'use_ascend : {use_ascend}')
print(f'device_id: {context.get_context("device_id")}')
train()
diff --git a/MindFlow/applications/data_driven/burgers_fno/FNO1D_CN.ipynb b/MindFlow/applications/data_driven/burgers_fno/FNO1D_CN.ipynb
index 80f8d470ea129562fd5742bca6ab0b380da932a9..b29e342a54340a47ef35a642b5bc7f9a78a6c3a1 100644
--- a/MindFlow/applications/data_driven/burgers_fno/FNO1D_CN.ipynb
+++ b/MindFlow/applications/data_driven/burgers_fno/FNO1D_CN.ipynb
@@ -289,7 +289,9 @@
}
},
"source": [
- "## Optimizer and Loss Function"
+ "## 优化器与损失函数\n",
+ "\n",
+ "使用相对均方根误差作为网络训练损失函数:"
]
},
{
@@ -326,9 +328,10 @@
}
},
"source": [
- "## 优化器与损失函数\n",
+ "## 模型训练\n",
"\n",
- "使用相对均方根误差作为网络训练损失函数:"
+ "使用 **MindSpore version >= 2.0.0**, 我们可以使用函数式编程来训练神经网络。 `MindFlow` 为非稳态问题 `UnsteadyFlowWithLoss` 提供了一个训练接口,用于模型训练和评估."
+
]
},
{
diff --git a/MindFlow/applications/data_driven/burgers_kno/train.py b/MindFlow/applications/data_driven/burgers_kno/train.py
index 36f5edebe117e7c84286b271dd2038b9d720fc92..d0d857d602cf058f02bc85494203b34b2de85e79 100644
--- a/MindFlow/applications/data_driven/burgers_kno/train.py
+++ b/MindFlow/applications/data_driven/burgers_kno/train.py
@@ -132,14 +132,17 @@ def main():
time_beg = time.time()
l_recons_train = 0.0
l_pred_train = 0.0
+ l_train = 0.0
for _ in range(train_size):
- _, l_recons, l_pred = train_sink()
+ l_full, l_recons, l_pred = train_sink()
l_recons_train += l_recons.asnumpy()
l_pred_train += l_pred.asnumpy()
+ l_train += l_full.asnumpy()
l_recons_train = l_recons_train / train_size
l_pred_train = l_pred_train / train_size
- print(f"epoch: {epoch} epoch time: {(time.time() - time_beg):>8f},"
- f" recons loss: {l_recons_train:>8f}, pred loss: {l_pred_train:>8f}")
+ l_train = l_train / train_size
+ print(f"epoch: {epoch} epoch time: {(time.time() - time_beg):>8f}s,"
+ f" recons loss: {l_recons_train:>8f}, pred loss: {l_pred_train:>8f}, Total loss: {l_train:>8f}")
if epoch % config['eval_interval'] == 0:
l_recons_eval = 0.0
diff --git a/MindFlow/applications/data_driven/cae_lstm/README.md b/MindFlow/applications/data_driven/cae_lstm/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0545d178a386bc79518e90511d7831ab4a21e9ed
--- /dev/null
+++ b/MindFlow/applications/data_driven/cae_lstm/README.md
@@ -0,0 +1,105 @@
+[简体中文](README_CN.md) | ENGLISH
+
+# CAE-LSTM reduced-order model
+
+## Overview
+
+### Background
+
+In order to effectively reduce the design cost and cycle time of using CFD methods, the reduced-order model (ROM) has gained wide attention in recent years. For complex compressible flows, using linear methods such as Proper Orthogonal Decomposition (POD) for flow field dimensionality reduction requires a large number of modes to ensure the reconstruction accuracy. It has been shown that the modes number can be effectively reduced by using nonlinear dimensionality reduction methods. Convolutional Autoencoder (CAE) is a kind of neural network composed of encoder and decoder, which can realize data dimensionality reduction and recon-struction, and can be regarded as a nonlinear extension of POD method. CAE is used for nonlinear dimension-ality reduction, and Long Short-Term Memory (LSTM) is used for time evolution. The CAE-LSTM can obtain high reconstruction and prediction accuracy on the premise of using less latents for unsteady compressible flows.
+
+### Model structure
+
+The basic framework of CAE-LSTM is mainly based on [paper](https://doi.org/10.13700/j.bh.1001-5965.2022.0085). It consists of CAE and LSTM, where the encoder in CAE reduces the dimensionality of the time series flow field to achieve feature extraction, LSTM learns low dimensional spatiotemporal features and makes predictions, and the decoder in CAE realizes flow field reconstruction.
+
++ Input:Input the flow field for a period of time
++ Compression:Extract high-dimensional spatiotemporal flow characteristics by dimensionality reduction of the flow field using the encoder of CAE
++ Evolution:Learning the evolution of spatiotemporal characteristics of low dimensional spatial flow fields through LSTM and predicting the next moment
++ Reconstruction:Restore the predicted low-dimensional features of the flow field to high-dimensional space through the decoder of CAE
++ Output:Output the predicted results of the transient flow field at the next moment
+
+
+
+### Dataset
+
+Source: Numerical simulation flow field data of one-dimensional Sod shock tube, Shu-Osher problem, Tow-dimensional Riemann problem and Kelvin-Helmholtz instability problem, provided by Professor Yu Jian from the School of Aeronautic Science and Engineering, Beihang University
+
+Establishment method: The calculation status and establishment method of the dataset can be found in [paper](https://doi.org/10.13700/j.bh.1001-5965.2022.0085)
+
+Data description:
+Sod shock tube: The coordinate range is \[0, 1\], and there is a thin film at x=0.5 in the middle. At the initial moment, remove the thin film in the middle of the shock tube and study the changes in gas density in the shock tube. The calculation time t ranges from \[0, 0.2\] and is divided into an average of 531 time steps. A total of 531 flow field snapshots, each with a matrix size of 256.
+
+Shu-Osher problem: The coordinate range is \[-5, 5\], and the calculation time t ranges from \[0, 1.8] and is divided into an average of 2093 time steps. A total of 2093 flow field snapshots, each with a matrix size of 512.
+
+Riemann problem: The coordinate range is \[0, 1\], and the calculation time t ranges from \[0, 0.25]. A total of 1250 flow field snapshots, each with a matrix size of (128, 128).
+
+Kelvin-Helmholtz instability problem: The coordinate range is \[-0.5, 0.5\], and the calculation time t ranges from \[0, 1.5] and is divided into an average of 1786 time steps. A total of 1786 flow field snapshots, each with a matrix size of (256, 256).
+
+The download address for the dataset is: [data_driven/cae-lstm/dataset](https://download.mindspore.cn/mindscience/mindflow/dataset/applications/data_driven/cae-lstm)
+
+## QuickStart
+
+### Run Option 1: Call `cae_train.py` and `lstm_train.py` from command line to start train cae and lstm network, respectively
+
++ Train the CAE network:
+
+`python -u cae_train.py --case sod --mode GRAPH --save_graphs False --save_graphs_path ./graphs --device_target GPU --device_id 0 --config_file_path ./config.yaml`
+
++ Train the LSTM network:
+
+`python -u lstm_train.py --case sod --mode GRAPH --save_graphs False --save_graphs_path ./graphs --device_target GPU --device_id 0 --config_file_path ./config.yaml`
+
+where:
+`--case` indicates the case to run. You can choose 'sod', 'shu_osher', riemann' or 'kh'. Default 'sod',where 'sod' and 'shu_osher' are one dimension cases, 'riemann' and 'kh' are two dimension cases
+
+`--config_file_path` indicates the path of the parameter file. Default './config.yaml'.
+
+`--device_target` indicates the computing platform. You can choose 'Ascend' or 'GPU'. Default 'Ascend'.
+
+`--device_id` indicates the index of NPU or GPU. Default 0.
+
+`--mode` is the running mode. 'GRAPH' indicates static graph mode. 'PYNATIVE' indicates dynamic graph mode. You can refer to [MindSpore official website](https://www.mindspore.cn/docs/zh-CN/master/design/dynamic_graph_and_static_graph.html) for details.Default 'GRAPH'.
+
+`--save_graphs` indicates whether to save the computational graph. Default 'False'.
+
+`--save_graphs_path` indicates the path to save the computational graph. Default './graphs'.
+
+### Run Option 2: Run Jupyter Notebook
+
+You can use [Chinese](./cae_lstm_CN.ipynb) or [English](./cae_lstm.ipynb) Jupyter Notebook to run the training and evaluation code line-by-line.
+
+## Results
+
+The following are the actual flow field, CAE-LSTM prediction results, and prediction errors of the four cases.
+
+The first two flow field results for each case show the variation of density at different x positions in the flow field over time, while the third error curve shows the variation of the average relative error between the CAE-LSTM flow field and the real flow field label over time. The overall prediction time error meet the accuracy requirements of flow field prediction.
+
+Sod shock tube:
+
+
+
+
+
+Shu-Osher problem:
+
+
+
+
+
+Riemann problem:
+
+
+
+
+
+Kelvin-Helmholtz instability problem:
+
+
+
+
+
+## Contributor
+
+gitee id: [xiaoruoye](https://gitee.com/xiaoruoye)
+
+email: 1159053026@qq.com
diff --git a/MindFlow/applications/data_driven/cae_lstm/README_CN.md b/MindFlow/applications/data_driven/cae_lstm/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..3a73e4e819e570ba239d0c184ca8dd80b200a820
--- /dev/null
+++ b/MindFlow/applications/data_driven/cae_lstm/README_CN.md
@@ -0,0 +1,105 @@
+[ENGLISH](README.md) | 简体中文
+
+# CAE-LSTM降阶模型
+
+## 概述
+
+### 背景
+
+降阶模型可有效降低使用CFD方法的设计成本和周期。对于复杂的可压缩流动,使用POD等线性方法进行流场降维,需要大量的模态才能保证流场重建的精度,而采用非线性降维方法能够有效减少所需模态数。卷积自编码器(CAE)是一种由编码器和解码器组成的神经网络,能够实现数据降维和重构,可看作是POD方法的非线性拓展。采用CAE进行流场数据的非线性降维,同时使用LSTM进行流场状态的时间演化。对于非定常可压缩流动,“CAE-LSTM”降阶模型能够在使用较少自由变量数的前提下获得较高的重构和预测精度。
+
+### 模型结构
+
+CAE-LSTM的基本框架主要基于[论文](https://doi.org/10.13700/j.bh.1001-5965.2022.0085) ,其由CAE和LSTM组成,其中CAE中的编码器降低时间序列流场的维数,实现特征提取,LSTM学习低维时空特征并进行预测,CAE中的解码器实现流场重建。
+
++ 输入:输入一段时间的流场;
++ 压缩:通过CAE的编码器对流场进行降维,提取高维时空流动特征;
++ 演化:通过LSTM学习低维空间流场时空特征的演变,预测下一时刻;
++ 重建:通过CAE的解码器将预测的流场低维特征恢复到高维空间;
++ 输出:输出对下一时刻瞬态流场的预测结果。
+
+
+
+### 数据集
+
+数据集来源:一维Sod激波管、Shu-Osher问题和二维黎曼问题、亥姆霍兹不稳定性问题的数值仿真流场数据,由北京航空航天大学航空科学与工程学院于剑副教授团队提供
+
+数据集建立方法:数据集计算状态与建立方法见[论文](https://doi.org/10.13700/j.bh.1001-5965.2022.0085)
+
+数据说明:
+Sod激波管:坐标x范围为[0, 1],中间x=0.5处有一薄膜。在初始时刻,将激波管中间的薄膜撤去,研究激波管中气体密度的变化情况。计算时间t范围为[0, 0.2],平均分成531个时间步。共531张流场快照,每张快照矩阵尺寸为128;
+
+Shu-Osher问题:坐标x范围为[-5, 5],计算时间t范围为[0, 1.8],平均分成2093个时间步。共2093张流场快照,每张快照矩阵尺寸为512;
+
+二维黎曼问题:坐标x, y范围为[0, 1],计算时间t范围为[0, 0.25],平均分成1250个时间步。共1250张流场快照,每张快照矩阵尺寸为(128, 128)。
+
+二维开尔文-亥姆霍兹不稳定性问题:坐标x, y范围为[-0.5, 0.5],计算时间t范围为[0, 1.5],分成1786个时间步。共1786张流场快照,每张快照矩阵尺寸为(256, 256)。
+
+数据集的下载地址为:[data_driven/cae-lstm/dataset](https://download.mindspore.cn/mindscience/mindflow/dataset/applications/data_driven/cae-lstm)
+
+## 快速开始
+
+### 训练方式一:在命令行中分别调用`cae_train.py`和`lstm_train.py`开始训练CAE和LSTM网络
+
++ 训练CAE网络:
+
+`python -u cae_train.py --case sod --mode GRAPH --save_graphs False --save_graphs_path ./graphs --device_target GPU --device_id 0 --config_file_path ./config.yaml`
+
++ 训练LSTM网络:
+
+`python -u lstm_train.py --case sod --mode GRAPH --save_graphs False --save_graphs_path ./graphs --device_target GPU --device_id 0 --config_file_path ./config.yaml`
+
+其中,
+`--case`表示运行的算例,可以选择'sod','shu_osher','riemann'和'kh', ,默认值'sod',其中'sod'和'shu_osher'为一维算例,'riemann'和'kh'为二维算例
+
+`--config_file_path`表示配置文件的路径,默认值'./config.yaml'
+
+`--device_target`表示使用的计算平台类型,可以选择'Ascend'或'GPU',默认值'GPU'
+
+`--device_id`表示使用的计算卡编号,可按照实际情况填写,默认值 0
+
+`--mode`表示运行的模式,'GRAPH'表示静态图模式, 'PYNATIVE'表示动态图模式,默认值'GRAPH',详见[MindSpore 官网](https://www.mindspore.cn/docs/zh-CN/master/design/dynamic_graph_and_static_graph.html)
+
+`--save_graphs`表示是否保存计算图,默认值'False'
+
+`--save_graphs_path`表示计算图保存的路径,默认值'./graphs'
+
+### 训练方式二:运行Jupyter Notebook
+
+您可以使用[中文版](./cae_lstm_CN.ipynb)或[英文版](./cae_lstm.ipynb) Jupyter Notebook 逐行运行训练和验证代码。
+
+## 结果展示
+
+以下分别为四个算例的真实流场,CAE-LSTM预测结果和预测误差。
+
+其中每个算例的前两个流场结果展现了流场中不同位置的密度随时间的变化情况,第三个误差曲线展现了CAE-LSTM流场与真实流场label的平均相对误差随时间的变化情况。整个预测时间误差都较小,满足流场预测精度需求。
+
+Sod激波管:
+
+
+
+
+
+Shu-Osher问题:
+
+
+
+
+
+黎曼问题:
+
+
+
+
+
+亥姆霍兹不稳定性问题:
+
+
+
+
+
+## 代码贡献
+
+gitee id: [xiaoruoye](https://gitee.com/xiaoruoye)
+
+邮箱: 1159053026@qq.com
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/cae_prediction.py b/MindFlow/applications/data_driven/cae_lstm/cae_eval.py
similarity index 42%
rename from MindFlow/applications/data_driven/cae_lstm/sod/cae_prediction.py
rename to MindFlow/applications/data_driven/cae_lstm/cae_eval.py
index 4c63b3b4e5fcf24d037644d82a1c8c7d91410663..30a671fe0538ec6173a7332fde9af921e5d909d3 100644
--- a/MindFlow/applications/data_driven/cae_lstm/sod/cae_prediction.py
+++ b/MindFlow/applications/data_driven/cae_lstm/cae_eval.py
@@ -13,31 +13,46 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
-"""prediction process"""
+"""prediction process of CaeNet"""
import os
import argparse
-import numpy as np
-from mindspore import load_checkpoint, load_param_into_net, set_seed, Tensor
+import numpy as np
+import mindspore as ms
+import mindspore.common.dtype as mstype
+from mindspore import load_checkpoint, load_param_into_net, set_seed, Tensor, ops
from mindflow.utils import load_yaml_config
-from src import CaeNet, create_cae_dataset, plot_cae_prediction
+from src import create_cae_dataset, CaeNet1D, CaeNet2D, plot_cae_prediction
np.random.seed(0)
set_seed(0)
-def cae_prediction():
- """Process of prediction with cae net"""
+def cae_eval(config_file_path, case):
+ """Process of prediction with CaeNet"""
# prepare params
- config = load_yaml_config(args.config_file_path)
- data_params = config["cae_data"]
- model_params = config["cae_model"]
- prediction_params = config["prediction"]
+ config = load_yaml_config(config_file_path)
+ if case in ('sod', 'shu_osher'):
+ data_params = config["1D_cae_data"]
+ model_params = config["1D_cae_model"]
+ prediction_params = config["1D_prediction"]
+ else:
+ data_params = config["2D_cae_data"]
+ model_params = config["2D_cae_model"]
+ prediction_params = config["2D_prediction"]
# prepare network
- cae = CaeNet(model_params["data_dimension"], model_params["conv_kernel_size"], model_params["maxpool_kernel_size"],
- model_params["maxpool_stride"], model_params["encoder_channels"], model_params["decoder_channels"])
+ if case in ('sod', 'shu_osher'):
+ cae = CaeNet1D(model_params["data_dimension"], model_params["conv_kernel_size"],
+ model_params["maxpool_kernel_size"], model_params["maxpool_stride"],
+ model_params["encoder_channels"], model_params["decoder_channels"])
+ else:
+ cae = CaeNet2D(model_params["data_dimension"], model_params["conv_kernel_size"],
+ model_params["maxpool_kernel_size"], model_params["maxpool_stride"],
+ model_params["encoder_channels"], model_params["decoder_channels"],
+ model_params["channels_dense"])
+
cae_param_dict = load_checkpoint(prediction_params["cae_ckpt_path"])
load_param_into_net(cae, cae_param_dict)
@@ -45,17 +60,33 @@ def cae_prediction():
_, true_data = create_cae_dataset(data_params["data_path"], data_params["batch_size"])
data_set = np.expand_dims(true_data, 1).astype(np.float32)
- print(f"=================Start cae prediction=====================")
- encoded = cae.encoder(Tensor(data_set))
- cae_predict = np.squeeze(cae(Tensor(data_set)).asnumpy())
- print(f"===================End cae prediction====================")
+ if case in ('sod', 'shu_osher'):
+ print(f"=================Start CaeNet1D prediction=====================")
+ encoded = cae.encoder(Tensor(data_set))
+ cae_predict = np.squeeze(cae(Tensor(data_set)).asnumpy())
+ print(f"===================End CaeNet1D prediction====================")
+ else:
+ print(f"=================Start CaeNet2D prediction=====================")
+ encoded = ops.zeros((data_params["time_size"], model_params["latent_size"]), mstype.float32)
+ cae_predict = np.zeros(true_data.shape)
+ for i in range(prediction_params["encoder_data_split"]):
+ time_predict_start, time_predict_end = \
+ prediction_params["encoder_time_spilt"][i], prediction_params["encoder_time_spilt"][i + 1]
+ encoded[time_predict_start: time_predict_end] = \
+ cae.encoder(ms.Tensor(data_set[time_predict_start: time_predict_end]))
+ cae_predict[time_predict_start: time_predict_end] = \
+ np.squeeze(cae(ms.Tensor(data_set[time_predict_start: time_predict_end])).asnumpy())
+ print(f"===================End CaeNet2D prediction====================")
+
plot_cae_prediction(encoded, cae_predict, true_data,
prediction_params["prediction_result_dir"], data_params["time_size"])
return encoded
if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='cae prediction')
+ parser = argparse.ArgumentParser(description='CaeNet eval')
+ parser.add_argument("--case", type=str, default="sod", choices=["sod", "shu_osher", "riemann", "kh"],
+ help="Which case to run, support 'sod', 'shu_osher', 'riemann', 'kh'")
parser.add_argument("--mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
help="Context mode, support 'GRAPH', 'PYNATIVE'")
parser.add_argument("--device_target", type=str, default="GPU", choices=["GPU", "CPU", "Ascend"],
@@ -65,4 +96,4 @@ if __name__ == "__main__":
args = parser.parse_args()
print(f"pid:{os.getpid()}")
- cae_prediction()
+ cae_eval(args.config_file_path, args.case)
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/cae_lstm_sod.ipynb b/MindFlow/applications/data_driven/cae_lstm/cae_lstm.ipynb
similarity index 61%
rename from MindFlow/applications/data_driven/cae_lstm/sod/cae_lstm_sod.ipynb
rename to MindFlow/applications/data_driven/cae_lstm/cae_lstm.ipynb
index b5eb2cff98e688a21afa6b695f3c2ba1ecb06bb9..50040049cfba440c8482c6bcd3a0606b49f053d3 100644
--- a/MindFlow/applications/data_driven/cae_lstm/sod/cae_lstm_sod.ipynb
+++ b/MindFlow/applications/data_driven/cae_lstm/cae_lstm.ipynb
@@ -6,7 +6,7 @@
"metadata": {},
"source": [
"\n",
- "# CAE-LSTM Reduced Order Model - Sod Shock Tube\n",
+ "# CAE-LSTM Reduced Order Model\n",
"\n",
"## Introduction\n",
"\n",
@@ -22,11 +22,11 @@
"\n",
"The CAE-LSTM reduced order model uses a CAE network to reduce the dimensionality of the flow field, extract the characteristics of the flow data, compress it into the hidden space of the encoder, and then use the LSTM network to perform coefficient time evolution on the free variables in the hidden space to obtain the free variables at other times of flow. Then, the decoder of the CAE network decodes the evolved free variables and reconstructs the flow field flow data at the corresponding time. The construction of the CAE-LSTM flow reduction model relies on the data reduction of the CAE network and the coefficient time evolution of the LSTM network. Compared with existing methods such as POD/DMD, using CAE networks for nonlinear dimensionality reduction of flow field data and LSTM networks for equation free evolution of free variables can achieve higher compression ratios and improve the efficiency of flow field prediction while ensuring the accuracy of the flow field reduction model.\n",
"\n",
- "+ Input:Input the flow field for a period of time\n",
- "+ Compression:Extract high-dimensional spatiotemporal flow characteristics by dimensionality reduction of the flow field using the encoder of CAE\n",
- "+ Evolution:Learning the evolution of spatiotemporal characteristics of low dimensional spatial flow fields through LSTM and predicting the next moment\n",
- "+ Reconstruction:Restore the predicted low-dimensional features of the flow field to high-dimensional space through the decoder of CAE\n",
- "+ Output:Output the predicted results of the transient flow field at the next moment\n",
+ "+ Input:Input the flow field for a period of time.\n",
+ "+ Compression:Extract high-dimensional spatiotemporal flow characteristics by dimensionality reduction of the flow field using the encoder of CAE.\n",
+ "+ Evolution:Learning the evolution of spatiotemporal characteristics of low dimensional spatial flow fields through LSTM and predicting the next moment.\n",
+ "+ Reconstruction:Restore the predicted low-dimensional features of the flow field to high-dimensional space through the decoder of CAE.\n",
+ "+ Output:Output the predicted results of the transient flow field at the next moment.\n",
"\n",
"The first step is to train the CAE network. After the training is completed, the CAE encoder is used to obtain the low dimensional features of the flow field. This low dimensional feature is used as the dataset of the LSTM network for LSTM network training."
]
@@ -36,61 +36,44 @@
"id": "901d5c41",
"metadata": {},
"source": [
- ""
+ ""
]
},
{
"cell_type": "markdown",
- "id": "7f317de2",
+ "id": "4e7406dd",
"metadata": {},
"source": [
- "## CAE dataset\n",
+ "## Training environment\n",
"\n",
- "One-dimensional sod shock tube\n",
+ "Import the required function library for training, where `src` includes dataset creation functions, network models and training loss visualization functions.\n",
"\n",
- "+ The Sod shock tube problem is an initial discontinuous evolution problem of one-dimensional inviscid compressible flow, and the data is a numerical simulation flow field. The coordinate range of the sod shock tube is \\[0, 1\\], and there is a thin film at x=0.5 in the middle. At the initial moment, remove the thin film in the middle of the shock tube and study the changes in gas density in the shock tube. The calculation time t ranges from \\[0, 0.2\\] and is divided into an average of 531 time steps. A total of 531 flow field snapshots, each with a matrix size of 256"
+ "You can choose different cases to run, i.e. `sod`, `shu_osher`, `riemann` or `kh`, among which `sod` and `shu_osher` are one-dimension cases, and `riemann` and `kh` are two-dimension cases. You can change the case name in the `case` of `parser.add_argument` to run the corresponding case. And if you use the command line to run network training, you can also write the case name after `--case` to run the corresponding case. Default `sod`.\n",
+ "\n",
+ "The static GRAPH of Mindspore framework is adopted for training. Training can be done on GPU (default) or Ascend (single card)."
]
},
{
"cell_type": "code",
"execution_count": 1,
- "id": "94159bd3",
+ "id": "17230db7",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import time\n",
"import argparse\n",
+ "\n",
"import numpy as np\n",
"\n",
"from mindspore import nn, ops, context, save_checkpoint, set_seed, jit, data_sink\n",
- "from mindflow.utils import load_yaml_config"
+ "from mindflow.utils import load_yaml_config\n",
+ "from src import create_cae_dataset, CaeNet1D, CaeNet2D, plot_train_loss"
]
},
{
"cell_type": "code",
"execution_count": 2,
- "id": "f5c6d767",
- "metadata": {},
- "outputs": [],
- "source": [
- "from src import create_cae_dataset, CaeNet, plot_train_loss"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "4e7406dd",
- "metadata": {},
- "source": [
- "## Training environment\n",
- "\n",
- "+ The static GRAPH of Mindspore framework is adopted for training\n",
- "+ Training can be done on GPU (default) or Ascend (single card)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
"id": "7e3ba84a",
"metadata": {},
"outputs": [],
@@ -101,12 +84,14 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": 3,
"id": "aa53aed1",
"metadata": {},
"outputs": [],
"source": [
- "parser = argparse.ArgumentParser(description='cae net for sod')\n",
+ "parser = argparse.ArgumentParser(description='CaeNet')\n",
+ "parser.add_argument(\"--case\", type=str, default=\"sod\", choices=[\"sod\", \"shu_osher\", \"riemann\", \"kh\"],\n",
+ " help=\"Which case to run, support 'sod', 'shu_osher', 'riemann', 'kh'\")\n",
"parser.add_argument(\"--mode\", type=str, default=\"GRAPH\", choices=[\"GRAPH\", \"PYNATIVE\"],\n",
" help=\"Context mode, support 'GRAPH', 'PYNATIVE'\")\n",
"parser.add_argument(\"--save_graphs\", type=bool, default=False, choices=[True, False],\n",
@@ -118,11 +103,13 @@
"parser.add_argument(\"--config_file_path\", type=str, default=\"./config.yaml\")\n",
"args = parser.parse_args()\n",
"\n",
- "context.set_context(mode=context.GRAPH_MODE if args.mode.upper().startswith(\"GRAPH\") else context.PYNATIVE_MODE,\n",
+ "context.set_context(case=args.case,\n",
+ " mode=context.GRAPH_MODE if args.mode.upper().startswith(\"GRAPH\") else context.PYNATIVE_MODE,\n",
" save_graphs=args.save_graphs,\n",
" save_graphs_path=args.save_graphs_path,\n",
" device_target=args.device_target,\n",
- " device_id=args.device_id)\n",
+ " device_id=args.device_id,\n",
+ " config_file_path=args.config_file_path)\n",
"use_ascend = context.get_context(attr_key='device_target') == \"Ascend\""
]
},
@@ -131,20 +118,27 @@
"id": "bbd5ca2c",
"metadata": {},
"source": [
- "## CAE training hyperparameter"
+ "## CAE training parameter settings\n",
+ "\n",
+ "Import corresponding parameter configurations for the dataset, CAE model, and optimizer from the config.yaml file according to the case chosen."
]
},
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 4,
"id": "37e0f61b",
"metadata": {},
"outputs": [],
"source": [
"config = load_yaml_config(args.config_file_path)\n",
- "data_params = config[\"cae_data\"]\n",
- "model_params = config[\"cae_model\"]\n",
- "optimizer_params = config[\"cae_optimizer\"]"
+ "if args.case == 'sod' or args.case == 'shu_osher':\n",
+ " data_params = config[\"1D_cae_data\"]\n",
+ " model_params = config[\"1D_cae_model\"]\n",
+ " optimizer_params = config[\"1D_cae_optimizer\"]\n",
+ "else:\n",
+ " data_params = config[\"2D_cae_data\"]\n",
+ " model_params = config[\"2D_cae_model\"]\n",
+ " optimizer_params = config[\"2D_cae_optimizer\"]"
]
},
{
@@ -152,12 +146,12 @@
"id": "8e53d5ec",
"metadata": {},
"source": [
- "## Path for saving training process files"
+ "The default path for saving loss files during training is optimizer_params [\"summary_dir\"], the weight parameters are saved in the ckpt folder."
]
},
{
"cell_type": "code",
- "execution_count": 6,
+ "execution_count": 5,
"id": "7e34bd79",
"metadata": {},
"outputs": [],
@@ -175,35 +169,28 @@
"id": "505908fc",
"metadata": {},
"source": [
- "## Construct CAE neural network"
+ "## Construct CAE neural network\n",
+ "\n",
+ "The CAE network consists of multiple layers of convolution and maximum pooling to form an encoder, and multiple layers of convolution and upsampling to form a decoder. Use MSELoss loss function and Adam optimizer."
]
},
{
"cell_type": "code",
- "execution_count": 7,
+ "execution_count": 6,
"id": "dbe1356d",
"metadata": {},
"outputs": [],
"source": [
- "cae = CaeNet(model_params[\"data_dimension\"], model_params[\"conv_kernel_size\"], model_params[\"maxpool_kernel_size\"],\n",
- " model_params[\"maxpool_stride\"], model_params[\"encoder_channels\"], model_params[\"decoder_channels\"])"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "a8e86579",
- "metadata": {},
- "source": [
- "## CAE optimizer"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 8,
- "id": "86c63294",
- "metadata": {},
- "outputs": [],
- "source": [
+ "if args.case == 'sod' or args.case == 'shu_osher':\n",
+ " cae = CaeNet1D(model_params[\"data_dimension\"], model_params[\"conv_kernel_size\"],\n",
+ " model_params[\"maxpool_kernel_size\"], model_params[\"maxpool_stride\"],\n",
+ " model_params[\"encoder_channels\"], model_params[\"decoder_channels\"])\n",
+ "else:\n",
+ " cae = CaeNet2D(model_params[\"data_dimension\"], model_params[\"conv_kernel_size\"],\n",
+ " model_params[\"maxpool_kernel_size\"], model_params[\"maxpool_stride\"],\n",
+ " model_params[\"encoder_channels\"], model_params[\"decoder_channels\"],\n",
+ " model_params[\"channels_dense\"])\n",
+ "\n",
"loss_fn = nn.MSELoss()\n",
"cae_opt = nn.Adam(cae.trainable_params(), optimizer_params[\"lr\"], weight_decay=optimizer_params[\"weight_decay\"])\n",
"\n",
@@ -217,72 +204,25 @@
},
{
"cell_type": "markdown",
- "id": "89d32ff9",
+ "id": "d1ea2da0",
"metadata": {},
"source": [
- "## CAE training framework"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "id": "505f3e5b",
- "metadata": {},
- "outputs": [],
- "source": [
- "def forward_fn(data, label):\n",
- " logits = cae(data)\n",
- " loss = loss_fn(logits, label)\n",
- " if use_ascend:\n",
- " loss = loss_scaler.scale(loss)\n",
- " return loss\n",
+ "## CAE dataset\n",
"\n",
- "grad_fn = ops.value_and_grad(forward_fn, None, cae_opt.parameters, has_aux=False)\n",
+ "Dataset download address: [data_driven/cae-lstm/dataset](https://download.mindspore.cn/mindscience/mindflow/dataset/applications/data_driven/cae-lstm)\n",
"\n",
- "@jit\n",
- "def train_step(data, label):\n",
- " loss, grads = grad_fn(data, label)\n",
- " if use_ascend:\n",
- " loss = loss_scaler.unscale(loss)\n",
- " if all_finite(grads):\n",
- " grads = loss_scaler.unscale(grads)\n",
- " loss = ops.depend(loss, cae_opt(grads))\n",
- " return loss"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "faacf783",
- "metadata": {},
- "source": [
- "## CAE dataset loading"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 10,
- "id": "25786be1",
- "metadata": {},
- "outputs": [],
- "source": [
- "cae_dataset, _ = create_cae_dataset(data_params[\"data_path\"], data_params[\"batch_size\"])"
- ]
- },
- {
- "cell_type": "markdown",
- "id": "9da7331a",
- "metadata": {},
- "source": [
- "## Data sink operation"
+ "After importing the dataset, perform data sinking settings."
]
},
{
"cell_type": "code",
- "execution_count": 11,
+ "execution_count": 7,
"id": "7eb8487b",
"metadata": {},
"outputs": [],
"source": [
+ "cae_dataset, _ = create_cae_dataset(data_params[\"data_path\"], data_params[\"batch_size\"])\n",
+ "\n",
"sink_process = data_sink(train_step, cae_dataset, sink_size=1)\n",
"train_data_size = cae_dataset.get_dataset_size()"
]
@@ -292,12 +232,14 @@
"id": "771dfcdf",
"metadata": {},
"source": [
- "# CAE training"
+ "## CAE training\n",
+ "\n",
+ "Build forward_fn and train_step, start training the CAE network and visualize the training loss."
]
},
{
"cell_type": "code",
- "execution_count": 12,
+ "execution_count": 8,
"id": "6f16d65f",
"metadata": {},
"outputs": [
@@ -306,7 +248,7 @@
"output_type": "stream",
"text": [
"pid:23104\n",
- "====================Start cae train=======================\n",
+ "====================Start CaeNet train=======================\n",
"epoch: 1 train loss: 0.00859989 epoch time: 3.23s\n",
"epoch: 2 train loss: 0.00563688 epoch time: 0.52s\n",
"epoch: 3 train loss: 0.00485115 epoch time: 0.53s\n",
@@ -318,12 +260,31 @@
"epoch: 4398 train loss: 1.14417275e-06 epoch time: 0.54s\n",
"epoch: 4399 train loss: 4.97764995e-06 epoch time:0.52s\n",
"epoch: 4400 train loss: 2.48092419e-06 epoch time: 0.55s\n",
- "====================End cae train=======================\n"
+ "====================End CaeNet train=======================\n"
]
}
],
"source": [
- "print(f\"====================Start cae train=======================\")\n",
+ "def forward_fn(data, label):\n",
+ " logits = cae(data)\n",
+ " loss = loss_fn(logits, label)\n",
+ " if use_ascend:\n",
+ " loss = loss_scaler.scale(loss)\n",
+ " return loss\n",
+ "\n",
+ "grad_fn = ops.value_and_grad(forward_fn, None, cae_opt.parameters, has_aux=False)\n",
+ "\n",
+ "@jit\n",
+ "def train_step(data, label):\n",
+ " loss, grads = grad_fn(data, label)\n",
+ " if use_ascend:\n",
+ " loss = loss_scaler.unscale(loss)\n",
+ " if all_finite(grads):\n",
+ " grads = loss_scaler.unscale(grads)\n",
+ " loss = ops.depend(loss, cae_opt(grads))\n",
+ " return loss\n",
+ "\n",
+ "print(f\"====================Start CaeNet train=======================\")\n",
"train_loss = []\n",
"for epoch in range(1, optimizer_params[\"epochs\"] + 1):\n",
" local_time_beg = time.time()\n",
@@ -336,7 +297,7 @@
"\n",
" if epoch % optimizer_params[\"save_ckpt_interval\"] == 0:\n",
" save_checkpoint(cae, f\"{ckpt_dir}/cae_{epoch}.ckpt\")\n",
- "print(f\"=====================End cae train========================\")\n",
+ "print(f\"=====================End CaeNet train========================\")\n",
"plot_train_loss(train_loss, summary_dir, optimizer_params[\"epochs\"], \"cae\")"
]
},
@@ -347,8 +308,17 @@
"source": [
"## CAE flow field reconstruction results\n",
"\n",
- "+ After training the CAE network, run cae_prediction.py to view the training results of CAE to determine whether to continue training the LSTM network\n",
- "+ The following figures show the real flow field, CAE flow field reconstruction results, and average relative error, respectively"
+ "After training the CAE network, run `cae_eval.py` to view the training results of CAE to determine whether to continue training the LSTM network\n",
+ "\n",
+ "The following figures show the real flow field, CAE flow field reconstruction results, and the error curves between them in the four cases. The first two flow field results show the variation of density at different x positions in the flow field over time, while the third error curve shows the average relative error of the CAE reconstructed flow field and the real flow field label over time. The errors meeting the accuracy requirements for flow field reconstruction."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "816cf7c6",
+ "metadata": {},
+ "source": [
+ "Sod shock tube:"
]
},
{
@@ -357,52 +327,104 @@
"metadata": {},
"source": [
"
\n",
- " \n",
- " \n",
- " \n",
+ " \n",
+ " \n",
""
]
},
{
"cell_type": "markdown",
- "id": "52e0a7e8",
+ "id": "c60101d7",
"metadata": {},
"source": [
- "## LSTM neural work\n",
- "\n",
- "The construction and training of LSTM network framework are similar to those of CAE network."
+ "Shu_Osher problem:"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "fa08460b",
+ "metadata": {},
+ "source": [
+ "
\n",
+ " \n",
+ " \n",
""
]
}
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/cae_lstm_prediction.py b/MindFlow/applications/data_driven/cae_lstm/cae_lstm_eval.py
similarity index 55%
rename from MindFlow/applications/data_driven/cae_lstm/sod/cae_lstm_prediction.py
rename to MindFlow/applications/data_driven/cae_lstm/cae_lstm_eval.py
index 4e66a58c82681f9e52b0c9c3a8df30a885c7629b..262123e8017fd8374f9cf328ed90f7b63f3226d3 100644
--- a/MindFlow/applications/data_driven/cae_lstm/sod/cae_lstm_prediction.py
+++ b/MindFlow/applications/data_driven/cae_lstm/cae_lstm_eval.py
@@ -16,36 +16,51 @@
"""prediction process"""
import os
import argparse
+
import numpy as np
+import mindspore as ms
from mindspore import load_checkpoint, load_param_into_net, set_seed, Tensor
-
from mindflow.utils import load_yaml_config
-from src import CaeNet, Lstm, create_cae_dataset, create_lstm_dataset, plot_cae_lstm_prediction
-from cae_prediction import cae_prediction
+from src import create_cae_dataset, create_lstm_dataset, CaeNet1D, CaeNet2D, Lstm, plot_cae_lstm_prediction
+from cae_eval import cae_eval
np.random.seed(0)
set_seed(0)
-def cae_lstm_prediction(encoded):
+def cae_lstm_eval(encoded):
"""Process of prediction with cae-lstm net"""
# prepare params
config = load_yaml_config(args.config_file_path)
- cae_data_params = config["cae_data"]
- lstm_data_params = config["lstm_data"]
- cae_model_params = config["cae_model"]
- lstm_model_params = config["lstm_model"]
- prediction_params = config["prediction"]
+ if args.case == 'sod' or args.case == 'shu_osher':
+ cae_data_params = config["1D_cae_data"]
+ lstm_data_params = config["1D_lstm_data"]
+ cae_model_params = config["1D_cae_model"]
+ lstm_model_params = config["1D_lstm_model"]
+ prediction_params = config["1D_prediction"]
+ else:
+ cae_data_params = config["2D_cae_data"]
+ lstm_data_params = config["2D_lstm_data"]
+ cae_model_params = config["2D_cae_model"]
+ lstm_model_params = config["2D_lstm_model"]
+ prediction_params = config["2D_prediction"]
# prepare network
lstm = Lstm(lstm_model_params["latent_size"], lstm_model_params["hidden_size"], lstm_model_params["num_layers"])
lstm_param_dict = load_checkpoint(prediction_params["lstm_ckpt_path"])
load_param_into_net(lstm, lstm_param_dict)
- cae = CaeNet(cae_model_params["data_dimension"], cae_model_params["conv_kernel_size"],
- cae_model_params["maxpool_kernel_size"], cae_model_params["maxpool_stride"],
- cae_model_params["encoder_channels"], cae_model_params["decoder_channels"])
+ if args.case == 'sod' or args.case == 'shu_osher':
+ cae = CaeNet1D(cae_model_params["data_dimension"], cae_model_params["conv_kernel_size"],
+ cae_model_params["maxpool_kernel_size"], cae_model_params["maxpool_stride"],
+ cae_model_params["encoder_channels"], cae_model_params["decoder_channels"])
+ else:
+ cae = CaeNet2D(cae_model_params["data_dimension"], cae_model_params["conv_kernel_size"],
+ cae_model_params["maxpool_kernel_size"], cae_model_params["maxpool_stride"],
+ cae_model_params["encoder_channels"], cae_model_params["decoder_channels"],
+ cae_model_params["channels_dense"])
+
cae_param_dict = load_checkpoint(prediction_params["cae_ckpt_path"])
load_param_into_net(cae, cae_param_dict)
@@ -59,23 +74,35 @@ def cae_lstm_prediction(encoded):
output_seq_pred = np.zeros(shape=(lstm_data_params["time_size"] - lstm_data_params["time_window"],
lstm_data_params["latent_size"]))
- print(f"=================Start lstm prediction=====================")
+ print(f"=================Start Lstm prediction=====================")
input_seq_pred = input_seq[0].reshape((1, lstm_data_params["time_window"], lstm_data_params["latent_size"]))
input_seq_pred = input_seq_pred.astype(np.float32)
for sample in range(0, lstm_data_params["time_size"] - lstm_data_params["time_window"]):
output_seq_pred[sample, :] = lstm(Tensor(input_seq_pred)).asnumpy()[0, 0, :]
input_seq_pred[0, : -1, :] = input_seq_pred[0, 1:, :]
input_seq_pred[0, -1, :] = output_seq_pred[sample, :]
- print(f"===================End lstm prediction====================")
+ print(f"===================End Lstm prediction====================")
lstm_latent = np.expand_dims(output_seq_pred, 1)
- lstm_latent = Tensor(lstm_latent.astype(np.float32))
- cae_lstm_predict = np.squeeze(cae.decoder(lstm_latent).asnumpy())
+ lstm_latent = ms.Tensor(lstm_latent.astype(np.float32))
+ if args.case == 'sod' or args.case == 'shu_osher':
+ cae_lstm_predict = np.squeeze(cae.decoder(lstm_latent).asnumpy())
+ else:
+ cae_lstm_predict_time = lstm_data_params["time_size"] - lstm_data_params["time_window"]
+ cae_lstm_predict = np.zeros((cae_lstm_predict_time, true_data.shape[1], true_data.shape[2]))
+ for i in range(prediction_params["decoder_data_split"]):
+ time_predict_start, time_predict_end = \
+ prediction_params["decoder_time_spilt"][i], prediction_params["decoder_time_spilt"][i + 1]
+ cae_lstm_predict[time_predict_start: time_predict_end] = \
+ np.squeeze(cae.decoder(lstm_latent[time_predict_start: time_predict_end]).asnumpy())
+
plot_cae_lstm_prediction(lstm_latent, cae_lstm_predict, true_data, prediction_params["prediction_result_dir"],
lstm_data_params["time_size"], lstm_data_params["time_window"])
if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='cae-lstm prediction')
+ parser = argparse.ArgumentParser(description='cae-lstm eval')
+ parser.add_argument("--case", type=str, default="sod", choices=["sod", "shu_osher", "riemann", "kh"],
+ help="Which case to run, support 'sod', 'shu_osher', 'riemann', 'kh'")
parser.add_argument("--mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
help="Context mode, support 'GRAPH', 'PYNATIVE'")
parser.add_argument("--device_target", type=str, default="GPU", choices=["GPU", "CPU", "Ascend"],
@@ -85,5 +112,5 @@ if __name__ == "__main__":
args = parser.parse_args()
print(f"pid:{os.getpid()}")
- cae_latent = cae_prediction()
- cae_lstm_prediction(cae_latent)
+ cae_latent = cae_eval(args.config_file_path, args.case)
+ cae_lstm_eval(cae_latent)
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/cae_train.py b/MindFlow/applications/data_driven/cae_lstm/cae_train.py
similarity index 71%
rename from MindFlow/applications/data_driven/cae_lstm/sod/cae_train.py
rename to MindFlow/applications/data_driven/cae_lstm/cae_train.py
index d2bc9e4ca58af3ab80da62830ad3843d9f6dea2b..cbe8631a22639dec348863641012be67707af120 100644
--- a/MindFlow/applications/data_driven/cae_lstm/sod/cae_train.py
+++ b/MindFlow/applications/data_driven/cae_lstm/cae_train.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""train"""
+"""CaeNet train"""
import os
import time
import argparse
@@ -20,21 +20,25 @@ import argparse
import numpy as np
from mindspore import nn, ops, context, save_checkpoint, set_seed, jit, data_sink
-
from mindflow.utils import load_yaml_config
-from src import create_cae_dataset, CaeNet, plot_train_loss
+from src import create_cae_dataset, CaeNet1D, CaeNet2D, plot_train_loss
np.random.seed(0)
set_seed(0)
def cae_train():
- """cae net train process"""
+ """CaeNet train process"""
# prepare params
config = load_yaml_config(args.config_file_path)
- data_params = config["cae_data"]
- model_params = config["cae_model"]
- optimizer_params = config["cae_optimizer"]
+ if args.case == 'sod' or args.case == 'shu_osher':
+ data_params = config["1D_cae_data"]
+ model_params = config["1D_cae_model"]
+ optimizer_params = config["1D_cae_optimizer"]
+ else:
+ data_params = config["2D_cae_data"]
+ model_params = config["2D_cae_model"]
+ optimizer_params = config["2D_cae_optimizer"]
# prepare summary file
summary_dir = optimizer_params["summary_dir"]
@@ -45,8 +49,16 @@ def cae_train():
os.mkdir(ckpt_dir)
# prepare model
- cae = CaeNet(model_params["data_dimension"], model_params["conv_kernel_size"], model_params["maxpool_kernel_size"],
- model_params["maxpool_stride"], model_params["encoder_channels"], model_params["decoder_channels"])
+ if args.case == 'sod' or args.case == 'shu_osher':
+ cae = CaeNet1D(model_params["data_dimension"], model_params["conv_kernel_size"],
+ model_params["maxpool_kernel_size"], model_params["maxpool_stride"],
+ model_params["encoder_channels"], model_params["decoder_channels"])
+ else:
+ cae = CaeNet2D(model_params["data_dimension"], model_params["conv_kernel_size"],
+ model_params["maxpool_kernel_size"], model_params["maxpool_stride"],
+ model_params["encoder_channels"], model_params["decoder_channels"],
+ model_params["channels_dense"])
+
loss_fn = nn.MSELoss()
cae_opt = nn.Adam(cae.trainable_params(), optimizer_params["lr"], weight_decay=optimizer_params["weight_decay"])
@@ -85,7 +97,7 @@ def cae_train():
sink_process = data_sink(train_step, cae_dataset, sink_size=1)
train_data_size = cae_dataset.get_dataset_size()
- print(f"====================Start cae train=======================")
+ print(f"====================Start CaeNet train=======================")
train_loss = []
for epoch in range(1, optimizer_params["epochs"] + 1):
local_time_beg = time.time()
@@ -98,12 +110,14 @@ def cae_train():
if epoch % optimizer_params["save_ckpt_interval"] == 0:
save_checkpoint(cae, f"{ckpt_dir}/cae_{epoch}.ckpt")
- print(f"=====================End cae train========================")
+ print(f"=====================End CaeNet train========================")
plot_train_loss(train_loss, summary_dir, optimizer_params["epochs"], "cae")
if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='cae net')
+ parser = argparse.ArgumentParser(description='CaeNet')
+ parser.add_argument("--case", type=str, default="sod", choices=["sod", "shu_osher", "riemann", "kh"],
+ help="Which case to run, support 'sod', 'shu_osher', 'riemann', 'kh'")
parser.add_argument("--mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
help="Context mode, support 'GRAPH', 'PYNATIVE'")
parser.add_argument("--save_graphs", type=bool, default=False, choices=[True, False],
@@ -115,11 +129,13 @@ if __name__ == '__main__':
parser.add_argument("--config_file_path", type=str, default="./config.yaml")
args = parser.parse_args()
- context.set_context(mode=context.GRAPH_MODE if args.mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
+ context.set_context(case=args.case,
+ mode=context.GRAPH_MODE if args.mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
save_graphs=args.save_graphs,
save_graphs_path=args.save_graphs_path,
device_target=args.device_target,
- device_id=args.device_id)
+ device_id=args.device_id,
+ config_file_path=args.config_file_path)
use_ascend = context.get_context(attr_key='device_target') == "Ascend"
print(f"pid: {os.getpid()}")
diff --git a/MindFlow/applications/data_driven/cae_lstm/config.yaml b/MindFlow/applications/data_driven/cae_lstm/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f1e5761832df485c7562d7384c0751dd953e3e47
--- /dev/null
+++ b/MindFlow/applications/data_driven/cae_lstm/config.yaml
@@ -0,0 +1,84 @@
+# 1D case
+1D_cae_data:
+ data_path: "./dataset/sod.npy" # ./dataset/sod.npy or ./dataset/shu_osher.npy
+ batch_size: 8 # 8 for sod, 16 for shu_osher
+ time_size: 531 # The numbers of the snapshots, 531 for sod, 2093 for shu_osher
+1D_cae_optimizer:
+ lr: 0.001
+ weight_decay: 0.000001
+ epochs: 4400
+ save_ckpt_interval: 400
+ summary_dir: "./summary" # The directory where the training process files are saved
+1D_cae_model:
+ data_dimension: [256, 128, 64, 32, 16, 8] # [256, 128, 64, 32, 16, 8] for sod, [512, 256, 128, 64, 32, 16] for shu_osher
+ conv_kernel_size: 3
+ maxpool_kernel_size: 2
+ maxpool_stride: 2
+ encoder_channels: [1, 40, 30, 20, 10, 5, 1]
+ decoder_channels: [1, 1, 5, 10, 20, 30, 40, 1]
+1D_lstm_data:
+ batch_size: 4 # 4 for sod, 16 for shu_osher
+ time_size: 531 # 531 for sod, 2093 for shu_osher
+ latent_size: 4 # 4 for sod, 8 for shu_osher
+ time_window: 70 # 70 for sod, 300 for shu_osher
+ gaussian_filter_sigma: 3
+1D_lstm_optimizer:
+ lr: 0.001
+ weight_decay: 0.000001
+ epochs: 4400
+ save_ckpt_interval: 400
+ summary_dir: "./summary"
+1D_lstm_model:
+ latent_size: 4 # The input size of LSTM, 4 for sod, 8 for shu_osher
+ hidden_size: 200
+ num_layers: 2 # 2 for sod, 3 for shu_osher
+1D_prediction:
+ cae_ckpt_path: "./summary/ckpt/cae_4400.ckpt"
+ lstm_ckpt_path: "./summary/ckpt/lstm_4000.ckpt"
+ prediction_result_dir: "./prediction_result"
+
+
+# 2D case
+2D_cae_data:
+ data_path: "./dataset/riemann.npy" # ./dataset/riemann.npy or ./dataset/kh.npy
+ batch_size: 16 # 16 for riemann, 32 for kh
+ time_size: 1250 # The numbers of the snapshots, 1250 for riemann, 1786 for kh
+2D_cae_optimizer:
+ lr: 0.001
+ weight_decay: 0.000001
+ epochs: 4400
+ save_ckpt_interval: 400
+ summary_dir: "./summary" # The directory where the training process files are saved
+2D_cae_model:
+ data_dimension: [128, 64, 32, 16, 8, 4] # [128, 64, 32, 16, 8, 4] for riemann, [256, 128, 64, 32, 16, 8] for kh
+ conv_kernel_size: 3
+ maxpool_kernel_size: 2
+ maxpool_stride: 2
+ encoder_channels: [ 1, 30, 25, 20, 15, 10, 5 ] # [ 1, 30, 25, 20, 15, 10, 5 ] for riemann, [1, 40, 30, 20, 10, 5, 1] for kh
+ decoder_channels: [ 5, 5, 10, 15, 20, 25, 30, 1 ] # [ 5, 5, 10, 15, 20, 25, 30, 1 ] for riemann, [1, 1, 5, 10, 20, 30, 40, 1] for kh
+ channels_dense: [ 20, 30, 25, 20 ] # [ 20, 30, 25, 20 ] for riemann, [16, 30, 25, 20] for kh
+ latent_size: 20
+2D_lstm_data:
+ batch_size: 32
+ time_size: 1250 # 1250 for riemann, 1786 for kh
+ latent_size: 20
+ time_window: 150 # 150 for riemann, 200 for kh
+ gaussian_filter_sigma: 3
+2D_lstm_optimizer:
+ lr: 0.001
+ weight_decay: 0.000001
+ epochs: 4400
+ save_ckpt_interval: 400
+ summary_dir: "./summary"
+2D_lstm_model:
+ latent_size: 20 # The input size of LSTM
+ hidden_size: 200
+ num_layers: 3
+2D_prediction:
+ encoder_data_split: 1 # Split the dataset to reduce memory, 1 for riemann, 4 for kh
+ encoder_time_spilt: [ 0, 1250 ] # [ 0, 1250 ] for riemann, [0, 500, 1000, 1500, 1786] for kh
+ decoder_data_split: 1 # 1 for riemann, 4 for kh
+ decoder_time_spilt: [ 0, 1250 ] # [ 0, 1250 ] for riemann, [0, 500, 1000, 1500, 1786] for kh
+ cae_ckpt_path: "./summary/ckpt/cae_4400.ckpt"
+ lstm_ckpt_path: "./summary/ckpt/lstm_4000.ckpt"
+ prediction_result_dir: "./prediction_result"
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/cae_lstm.png b/MindFlow/applications/data_driven/cae_lstm/images/cae_lstm.png
new file mode 100644
index 0000000000000000000000000000000000000000..c46b96c2cecc7585959ff95d3788b4556d420565
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/cae_lstm.png differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/images/CAE-LSTM.png b/MindFlow/applications/data_driven/cae_lstm/images/cae_lstm_CN.png
similarity index 100%
rename from MindFlow/applications/data_driven/cae_lstm/sod/images/CAE-LSTM.png
rename to MindFlow/applications/data_driven/cae_lstm/images/cae_lstm_CN.png
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_error.png b/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_error.png
new file mode 100644
index 0000000000000000000000000000000000000000..412e4a90c862a1eeaa2d99a322556ac6398f4e91
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_error.png differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_lstm_error.png b/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_lstm_error.png
new file mode 100644
index 0000000000000000000000000000000000000000..94429998eac69eca11521e18db1503c57708d056
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_lstm_error.png differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_lstm_predict.gif b/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_lstm_predict.gif
new file mode 100644
index 0000000000000000000000000000000000000000..37f91a3e02e912ad2007c4a7220dc02a65c3fd64
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_lstm_predict.gif differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_reconstruction.gif b/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_reconstruction.gif
new file mode 100644
index 0000000000000000000000000000000000000000..140fec2cd433224d11ce3b456bcae252b3bdd729
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/kh_cae_reconstruction.gif differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_error.png b/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_error.png
new file mode 100644
index 0000000000000000000000000000000000000000..3aae4d5bccd0739f2d3e4323ae402b18950ffdd9
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_error.png differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_lstm_error.png b/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_lstm_error.png
new file mode 100644
index 0000000000000000000000000000000000000000..1a2b33dc64dd1224d4c11aad990548f67e2d0f39
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_lstm_error.png differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_lstm_predict.gif b/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_lstm_predict.gif
new file mode 100644
index 0000000000000000000000000000000000000000..74010dc8b8f5e804751c47cdd7f0fdbfee04005c
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_lstm_predict.gif differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_reconstruction.gif b/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_reconstruction.gif
new file mode 100644
index 0000000000000000000000000000000000000000..ef4acd746a814abec734947c99c1c4d876c2da08
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/riemann_cae_reconstruction.gif differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_error.png b/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_error.png
new file mode 100644
index 0000000000000000000000000000000000000000..4831c02bc82ed84349751e5ecc8c39ba9ab4cb2b
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_error.png differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_lstm_error.png b/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_lstm_error.png
new file mode 100644
index 0000000000000000000000000000000000000000..30ead5e010e6d46fb0ad21d0830825e7debcb85e
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_lstm_error.png differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_lstm_predict.gif b/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_lstm_predict.gif
new file mode 100644
index 0000000000000000000000000000000000000000..ee772015f6c749bb9a59171c07936ab3621db332
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_lstm_predict.gif differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_reconstruction.gif b/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_reconstruction.gif
new file mode 100644
index 0000000000000000000000000000000000000000..a1aa2d0680f4fa7a37baace6c9f795de2b2740ad
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/shu_osher_cae_reconstruction.gif differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/images/cae_error.png b/MindFlow/applications/data_driven/cae_lstm/images/sod_cae_error.png
similarity index 100%
rename from MindFlow/applications/data_driven/cae_lstm/sod/images/cae_error.png
rename to MindFlow/applications/data_driven/cae_lstm/images/sod_cae_error.png
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/sod_cae_lstm_error.png b/MindFlow/applications/data_driven/cae_lstm/images/sod_cae_lstm_error.png
new file mode 100644
index 0000000000000000000000000000000000000000..5d563ba06134417c86b568ec7149438815cf0194
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/sod_cae_lstm_error.png differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/sod_cae_lstm_predict.gif b/MindFlow/applications/data_driven/cae_lstm/images/sod_cae_lstm_predict.gif
new file mode 100644
index 0000000000000000000000000000000000000000..cc7ef60deb71d061cfc768daacbba2debbe0dddf
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/sod_cae_lstm_predict.gif differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/images/sod_cae_reconstruction.gif b/MindFlow/applications/data_driven/cae_lstm/images/sod_cae_reconstruction.gif
new file mode 100644
index 0000000000000000000000000000000000000000..382433c5bb661468822c500857f3379c2c30b30d
Binary files /dev/null and b/MindFlow/applications/data_driven/cae_lstm/images/sod_cae_reconstruction.gif differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/lstm_train.py b/MindFlow/applications/data_driven/cae_lstm/lstm_train.py
similarity index 80%
rename from MindFlow/applications/data_driven/cae_lstm/sod/lstm_train.py
rename to MindFlow/applications/data_driven/cae_lstm/lstm_train.py
index 0bc7a758f68cedd7292bb4f4aa0d56027371958e..a64a0f9c037df8f1c62d6ee6c9dc348932571842 100644
--- a/MindFlow/applications/data_driven/cae_lstm/sod/lstm_train.py
+++ b/MindFlow/applications/data_driven/cae_lstm/lstm_train.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
-"""train"""
+"""Lstm train"""
import os
import time
import argparse
@@ -20,22 +20,26 @@ import argparse
import numpy as np
from mindspore import nn, ops, context, save_checkpoint, set_seed, jit, data_sink
-
from mindflow.utils import load_yaml_config
from src import create_lstm_dataset, Lstm, plot_train_loss
-from cae_prediction import cae_prediction
+from cae_eval import cae_eval
np.random.seed(0)
set_seed(0)
def lstm_train():
- """lstm net train process"""
+ """Lstm train process"""
# prepare params
config = load_yaml_config(args.config_file_path)
- data_params = config["lstm_data"]
- model_params = config["lstm_model"]
- optimizer_params = config["lstm_optimizer"]
+ if args.case == 'sod' or args.case == 'shu_osher':
+ data_params = config["1D_lstm_data"]
+ model_params = config["1D_lstm_model"]
+ optimizer_params = config["1D_lstm_optimizer"]
+ else:
+ data_params = config["1D_lstm_data"]
+ model_params = config["1D_lstm_model"]
+ optimizer_params = config["1D_lstm_optimizer"]
# prepare summary file
summary_dir = optimizer_params["summary_dir"]
@@ -75,7 +79,7 @@ def lstm_train():
return loss
# prepare dataset
- latent_true = cae_prediction()
+ latent_true = cae_eval(args.config_file_path, args.case)
lstm_dataset, _ = create_lstm_dataset(latent_true, data_params["batch_size"], data_params["time_size"],
data_params["latent_size"], data_params["time_window"],
data_params["gaussian_filter_sigma"])
@@ -84,7 +88,7 @@ def lstm_train():
sink_process = data_sink(train_step, lstm_dataset, sink_size=1)
train_data_size = lstm_dataset.get_dataset_size()
- print(f"====================Start lstm train=======================")
+ print(f"====================Start Lstm train=======================")
train_loss = []
for epoch in range(1, optimizer_params["epochs"] + 1):
local_time_beg = time.time()
@@ -97,12 +101,14 @@ def lstm_train():
if epoch % optimizer_params["save_ckpt_interval"] == 0:
save_checkpoint(lstm, f"{ckpt_dir}/lstm_{epoch}.ckpt")
- print(f"=====================End lstm train========================")
+ print(f"=====================End Lstm train========================")
plot_train_loss(train_loss, summary_dir, optimizer_params["epochs"], "lstm")
if __name__ == '__main__':
- parser = argparse.ArgumentParser(description='lstm net')
+ parser = argparse.ArgumentParser(description='Lstm')
+ parser.add_argument("--case", type=str, default="sod", choices=["sod", "shu_osher", "riemann", "kh"],
+ help="Which case to run, support 'sod', 'shu_osher', 'riemann', 'kh'")
parser.add_argument("--mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
help="Context mode, support 'GRAPH', 'PYNATIVE'")
parser.add_argument("--save_graphs", type=bool, default=False, choices=[True, False],
@@ -114,11 +120,13 @@ if __name__ == '__main__':
parser.add_argument("--config_file_path", type=str, default="./config.yaml")
args = parser.parse_args()
- context.set_context(mode=context.GRAPH_MODE if args.mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
+ context.set_context(case=args.case,
+ mode=context.GRAPH_MODE if args.mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
save_graphs=args.save_graphs,
save_graphs_path=args.save_graphs_path,
device_target=args.device_target,
- device_id=args.device_id)
+ device_id=args.device_id,
+ config_file_path=args.config_file_path)
use_ascend = context.get_context(attr_key='device_target') == "Ascend"
print(f"pid: {os.getpid()}")
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/README.md b/MindFlow/applications/data_driven/cae_lstm/sod/README.md
deleted file mode 100644
index 41bb999d220010791924aed12703af96d089b677..0000000000000000000000000000000000000000
--- a/MindFlow/applications/data_driven/cae_lstm/sod/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
----
-
-# Introduction
-
-In order to effectively reduce the design cost and cycle time of using CFD methods, the reduced-order model (ROM) has gained wide attention in recent years. For complex compressible flows, using linear methods such as Proper Orthogonal Decomposition (POD) for flow field dimensionality reduction requires a large number of modes to ensure the reconstruction accuracy. It has been shown that the modes number can be effectively reduced by using nonlinear dimensionality reduction methods. Convolutional Autoencoder (CAE) is a kind of neural network composed of encoder and decoder, which can realize data dimensionality reduction and recon-struction, and can be regarded as a nonlinear extension of POD method. CAE is used for nonlinear dimension-ality reduction, and Long Short-Term Memory (LSTM) is used for time evolution. The CAE-LSTM can obtain high reconstruction and prediction accuracy on the premise of using less latents for unsteady compressible flows.
-
-# Framework of CAE-LSTM
-
-The basic framework of CAE-LSTM is mainly based on [paper](https://doi.org/10.13700/j.bh.1001-5965.2022.0085). It consists of CAE and LSTM, where the encoder in CAE reduces the dimensionality of the time series flow field to achieve feature extraction, LSTM learns low dimensional spatiotemporal features and makes predictions, and the decoder in CAE realizes flow field reconstruction.
-
-+ Input:Input the flow field for a period of time
-+ Compression:Extract high-dimensional spatiotemporal flow characteristics by dimensionality reduction of the flow field using the encoder of CAE
-+ Evolution:Learning the evolution of spatiotemporal characteristics of low dimensional spatial flow fields through LSTM and predicting the next moment
-+ Reconstruction:Restore the predicted low-dimensional features of the flow field to high-dimensional space through the decoder of CAE
-+ Output:Output the predicted results of the transient flow field at the next moment
-
-
-
-# Dataset
-
-+ Source: Numerical simulation flow field data of one-dimensional Sod shock tube, provided by Professor Yu Jian from the School of Aeronautic Science and Engineering, Beihang University
-
-+ Establishment method: The calculation status and establishment method of the dataset can be found in [paper](https://doi.org/10.13700/j.bh.1001-5965.2022.0085)
-
-+ Data description:
- + The coordinate range of the sod shock tube is \[0, 1\], and there is a thin film at x=0.5 in the middle. At the initial moment, remove the thin film in the middle of the shock tube and study the changes in gas density in the shock tube. The calculation time t ranges from \[0, 0.2\] and is divided into an average of 531 time steps. A total of 531 flow field snapshots, each with a matrix size of 256
- + The download address for the dataset is: https://download.mindspore.cn/mindscience/mindflow/dataset/applications/data_driven/cae-lstm/sod/sod.npy
-
-# Training process
-
-The model is trained by single machine and single card. According to the training task requirements, run cae_train.py and lstm_train.py to start training; Before training, relevant training conditions need to be set in config.yaml.
-
-+ python cae_train.py
-+ python lstm_train.py
-
-# Visualization of prediction results
-
-Run prediction.py according to the training conditions Post-processing operation:
-
-+ Based on the weight parameter file of the training results, predict the dimensionality reduction and reconstruction data of CAE, the evolution data of LSTM, and the flow field data;
-
-+ Calculate the average relative error of CAE reconstruction data and CAE-LSTM predicted flow field data separately;
-
-+ The default saving path is prediction_result.
-
-# Prediction result
-
-The following are the actual flow field, CAE-LSTM prediction results, and prediction errors:
-
-
-
-
-
-
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/README_CN.md b/MindFlow/applications/data_driven/cae_lstm/sod/README_CN.md
deleted file mode 100644
index 7f7b04baf14e5330f0db380ec0dce198a4707119..0000000000000000000000000000000000000000
--- a/MindFlow/applications/data_driven/cae_lstm/sod/README_CN.md
+++ /dev/null
@@ -1,56 +0,0 @@
----
-
-# 背景介绍
-
-降阶模型可有效降低使用CFD方法的设计成本和周期。对于复杂的可压缩流动,使用POD等线性方法进行流场降维,需要大量的模态才能保证流场重建的精度,而采用非线性降维方法能够有效减少所需模态数。卷积自编码器(CAE)是一种由编码器和解码器组成的神经网络,能够实现数据降维和重构,可看作是POD方法的非线性拓展。采用CAE进行流场数据的非线性降维,同时使用LSTM进行流场状态的时间演化。对于非定常可压缩流动,“CAE-LSTM”降阶模型能够在使用较少自由变量数的前提下获得较高的重构和预测精度。
-
-# 模型架构
-
-CAE-LSTM的基本框架主要基于[论文](https://doi.org/10.13700/j.bh.1001-5965.2022.0085),其由CAE和LSTM组成,其中CAE中的编码器降低时间序列流场的维数,实现特征提取,LSTM学习低维时空特征并进行预测,CAE中的解码器实现流场重建。
-
-+ 输入:输入一段时间的流场;
-+ 压缩:通过CAE的编码器对流场进行降维,提取高维时空流动特征;
-+ 演化:通过LSTM学习低维空间流场时空特征的演变,预测下一时刻;
-+ 重建:通过CAE的解码器将预测的流场低维特征恢复到高维空间;
-+ 输出:输出对下一时刻瞬态流场的预测结果。
-
-
-
-# 数据集
-
-+ 来源:一维Sod激波管的数值仿真流场数据,由北京航空航天大学航空科学与工程学院于剑副教授团队提供
-
-+ 建立方法:数据集计算状态与建立方法见[论文](https://doi.org/10.13700/j.bh.1001-5965.2022.0085)
-
-+ 数据说明:
- + Sod激波管坐标x范围为[0, 1],中间x=0.5处有一薄膜。在初始时刻,将激波管中间的薄膜撤去,研究激波管中气体密度的变化情况。计算时间t范围为[0, 0.2],平均分成531个时间步。
- + 数据集的下载地址为:https://download.mindspore.cn/mindscience/mindflow/dataset/applications/data_driven/cae-lstm/sod/sod.npy
-
-# 训练过程
-
-该模型单机单卡进行训练,根据训练任务需求,分别执行cae_train.py和lstm_train.py开始训练CAE和LSTM网络;
-在开始训练前需要在config.yaml中设置数据读取保存路径和训练参数等相关训练条件。
-
-+ python cae_train.py
-+ python lstm_train.py
-
-# 预测结果可视化
-
-根据训练条件,执行prediction.py;
-后处理操作:
-
-+ 根据训练结果的权重参数文件,预测输出CAE的降维、重构数据,LSTM的演化数据和CAE-LSTM预测的流场数据.npy文件;
-
-+ 分别计算CAE的重构数据和CAE-LSTM预测的流场数据的平均相对误差;
-
-+ 保存路径默认为:prediction_result。
-
-# 预测结果展示
-
-以下分别为真实流场,CAE-LSTM预测结果和预测误差:
-
-
-
-
-
-
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/config.yaml b/MindFlow/applications/data_driven/cae_lstm/sod/config.yaml
deleted file mode 100644
index b2deddf8eedbfb0f0df5c0febf3f378f7ecf4262..0000000000000000000000000000000000000000
--- a/MindFlow/applications/data_driven/cae_lstm/sod/config.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
-cae_data:
- data_path: "./dataset/sod.npy"
- batch_size: 8
- time_size: 531 # The numbers of the snapshots
-cae_optimizer:
- lr: 0.001
- weight_decay: 0.000001
- epochs: 4400
- save_ckpt_interval: 400
- summary_dir: "./summary" # The directory where the training process files are saved
-cae_model:
- data_dimension: [256, 128, 64, 32, 16, 8]
- conv_kernel_size: 3
- maxpool_kernel_size: 2
- maxpool_stride: 2
- encoder_channels: [1, 40, 30, 20, 10, 5, 1]
- decoder_channels: [1, 1, 5, 10, 20, 30, 40, 1]
-lstm_data:
- batch_size: 4
- time_size: 531
- latent_size: 4
- time_window: 70
- gaussian_filter_sigma: 3
-lstm_optimizer:
- lr: 0.001
- weight_decay: 0.000001
- epochs: 4400
- save_ckpt_interval: 400
- summary_dir: "./summary"
-lstm_model:
- latent_size: 4 # The input size of LSTM
- hidden_size: 200
- num_layers: 2
-prediction:
- cae_ckpt_path: "./summary/ckpt/cae_4400.ckpt"
- lstm_ckpt_path: "./summary/ckpt/lstm_4000.ckpt"
- prediction_result_dir: "./prediction_result"
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/images/CAE-LSTM1.png b/MindFlow/applications/data_driven/cae_lstm/sod/images/CAE-LSTM1.png
deleted file mode 100644
index d6b37a4b485d4f41ece57fe0d5c4156a191b4425..0000000000000000000000000000000000000000
Binary files a/MindFlow/applications/data_driven/cae_lstm/sod/images/CAE-LSTM1.png and /dev/null differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/images/cae.gif b/MindFlow/applications/data_driven/cae_lstm/sod/images/cae.gif
deleted file mode 100644
index a67f69236071b1666b3f93cc4d99f472078b079e..0000000000000000000000000000000000000000
Binary files a/MindFlow/applications/data_driven/cae_lstm/sod/images/cae.gif and /dev/null differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/images/cae_lstm_error.png b/MindFlow/applications/data_driven/cae_lstm/sod/images/cae_lstm_error.png
deleted file mode 100644
index 625f3bb96d928dbe3a05d65604efe4126180beca..0000000000000000000000000000000000000000
Binary files a/MindFlow/applications/data_driven/cae_lstm/sod/images/cae_lstm_error.png and /dev/null differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/images/true.gif b/MindFlow/applications/data_driven/cae_lstm/sod/images/true.gif
deleted file mode 100644
index 988708f91f0f3ee412153f0bea5cbd32ad24be84..0000000000000000000000000000000000000000
Binary files a/MindFlow/applications/data_driven/cae_lstm/sod/images/true.gif and /dev/null differ
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/src/__init__.py b/MindFlow/applications/data_driven/cae_lstm/src/__init__.py
similarity index 93%
rename from MindFlow/applications/data_driven/cae_lstm/sod/src/__init__.py
rename to MindFlow/applications/data_driven/cae_lstm/src/__init__.py
index 3aaadc3e851f57cbc1450377af86c7ab9380695c..0ecb03afdf5afc2c320580305496325955a175db 100644
--- a/MindFlow/applications/data_driven/cae_lstm/sod/src/__init__.py
+++ b/MindFlow/applications/data_driven/cae_lstm/src/__init__.py
@@ -15,13 +15,14 @@
# ============================================================================
"""init"""
from .dataset import create_cae_dataset, create_lstm_dataset
-from .model import CaeNet, Lstm
+from .model import CaeNet1D, CaeNet2D, Lstm
from .postprocess import plot_train_loss, plot_cae_prediction, plot_cae_lstm_prediction
__all__ = [
"create_cae_dataset",
"create_lstm_dataset",
- "CaeNet",
+ "CaeNet1D",
+ "CaeNet2D",
"Lstm",
"plot_train_loss",
"plot_cae_prediction",
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/src/dataset.py b/MindFlow/applications/data_driven/cae_lstm/src/dataset.py
similarity index 100%
rename from MindFlow/applications/data_driven/cae_lstm/sod/src/dataset.py
rename to MindFlow/applications/data_driven/cae_lstm/src/dataset.py
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/src/model.py b/MindFlow/applications/data_driven/cae_lstm/src/model.py
similarity index 47%
rename from MindFlow/applications/data_driven/cae_lstm/sod/src/model.py
rename to MindFlow/applications/data_driven/cae_lstm/src/model.py
index 5d4839ec9eefe92776d0b5f57470afe8eb400ef0..6be982594ef776adea36dca21263d1315f31b746 100644
--- a/MindFlow/applications/data_driven/cae_lstm/sod/src/model.py
+++ b/MindFlow/applications/data_driven/cae_lstm/src/model.py
@@ -15,15 +15,16 @@
"""
cae-lstm model
"""
-from mindspore import nn, ops, float32
+import mindspore.common.dtype as mstype
+from mindspore import nn, ops
-class CaeEncoder(nn.Cell):
+class CaeEncoder1D(nn.Cell):
"""
encoder net
"""
def __init__(self, conv_kernel_size, maxpool_kernel_size, maxpool_stride, channels_encoder):
- super(CaeEncoder, self).__init__()
+ super(CaeEncoder1D, self).__init__()
self.conv1 = nn.Conv1d(channels_encoder[0], channels_encoder[1], conv_kernel_size,
has_bias=True, weight_init='HeUniform')
self.conv2 = nn.Conv1d(channels_encoder[1], channels_encoder[2], conv_kernel_size,
@@ -70,12 +71,12 @@ class CaeEncoder(nn.Cell):
return x
-class CaeDecoder(nn.Cell):
+class CaeDecoder1D(nn.Cell):
"""
decoder net
"""
def __init__(self, data_dimension, conv_kernel_size, channels_decoder):
- super(CaeDecoder, self).__init__()
+ super(CaeDecoder1D, self).__init__()
self.conv1 = nn.Conv1d(channels_decoder[0], channels_decoder[1], conv_kernel_size,
has_bias=True, weight_init='HeUniform')
self.conv2 = nn.Conv1d(channels_decoder[1], channels_decoder[2], conv_kernel_size,
@@ -138,15 +139,168 @@ class CaeDecoder(nn.Cell):
return x
-class CaeNet(nn.Cell):
+class CaeNet1D(nn.Cell):
"""
cae net
"""
def __init__(self, data_dimension, conv_kernel_size, maxpool_kernel_size, maxpool_stride,
channels_encoder, channels_decoder):
- super(CaeNet, self).__init__()
- self.encoder = CaeEncoder(conv_kernel_size, maxpool_kernel_size, maxpool_stride, channels_encoder)
- self.decoder = CaeDecoder(data_dimension, conv_kernel_size, channels_decoder)
+ super(CaeNet1D, self).__init__()
+ self.encoder = CaeEncoder1D(conv_kernel_size, maxpool_kernel_size, maxpool_stride, channels_encoder)
+ self.decoder = CaeDecoder1D(data_dimension, conv_kernel_size, channels_decoder)
+
+ def construct(self, x):
+ lattent = self.encoder(x)
+ x = self.decoder(lattent)
+ return x
+
+
+class CaeEncoder2D(nn.Cell):
+ """
+ encoder net
+ """
+ def __init__(self, conv_kernel_size, maxpool_kernel_size, maxpool_stride, channels_encoder, channels_dense):
+ super(CaeEncoder2D, self).__init__()
+ self.conv1 = nn.Conv2d(channels_encoder[0], channels_encoder[1], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv2 = nn.Conv2d(channels_encoder[1], channels_encoder[2], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv3 = nn.Conv2d(channels_encoder[2], channels_encoder[3], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv4 = nn.Conv2d(channels_encoder[3], channels_encoder[4], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv5 = nn.Conv2d(channels_encoder[4], channels_encoder[5], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv6 = nn.Conv2d(channels_encoder[5], channels_encoder[6], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+
+ self.max_pool2d = nn.MaxPool2d(kernel_size=maxpool_kernel_size, stride=maxpool_stride)
+
+ self.relu = nn.ReLU()
+
+ self.flatten = ops.Flatten()
+
+ self.dense1 = nn.Dense(channels_dense[0], channels_dense[1], weight_init='HeUniform', activation='relu')
+ self.dense2 = nn.Dense(channels_dense[1], channels_dense[2], weight_init='HeUniform', activation='relu')
+ self.dense3 = nn.Dense(channels_dense[2], channels_dense[3], weight_init='HeUniform')
+
+ self.reshape = ops.Reshape()
+
+ def construct(self, x):
+ """
+ encoder construct
+ """
+ x = self.conv1(x)
+ x = self.relu(x)
+ x = self.max_pool2d(x)
+
+ x = self.conv2(x)
+ x = self.relu(x)
+ x = self.max_pool2d(x)
+
+ x = self.conv3(x)
+ x = self.relu(x)
+ x = self.max_pool2d(x)
+
+ x = self.conv4(x)
+ x = self.relu(x)
+ x = self.max_pool2d(x)
+
+ x = self.conv5(x)
+ x = self.relu(x)
+ x = self.max_pool2d(x)
+
+ x = self.conv6(x)
+ x = self.relu(x)
+ x = self.max_pool2d(x)
+
+ x = self.flatten(x)
+
+ x = self.dense1(x)
+ x = self.dense2(x)
+ x = self.dense3(x)
+ return x
+
+
+class CaeDecoder2D(nn.Cell):
+ """
+ decoder net
+ """
+ def __init__(self, data_dimension, conv_kernel_size, channels_decoder, channels_dense):
+ super(CaeDecoder2D, self).__init__()
+ self.dense1 = nn.Dense(channels_dense[3], channels_dense[2], weight_init='HeUniform', activation='relu')
+ self.dense2 = nn.Dense(channels_dense[2], channels_dense[1], weight_init='HeUniform', activation='relu')
+ self.dense3 = nn.Dense(channels_dense[1], channels_dense[0], weight_init='HeUniform', activation='relu')
+ self.reshape = ops.Reshape()
+ self.conv1 = nn.Conv2d(channels_decoder[0], channels_decoder[1], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv2 = nn.Conv2d(channels_decoder[1], channels_decoder[2], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv3 = nn.Conv2d(channels_decoder[2], channels_decoder[3], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv4 = nn.Conv2d(channels_decoder[3], channels_decoder[4], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv5 = nn.Conv2d(channels_decoder[4], channels_decoder[5], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv6 = nn.Conv2d(channels_decoder[5], channels_decoder[6], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+ self.conv7 = nn.Conv2d(channels_decoder[6], channels_decoder[7], conv_kernel_size,
+ has_bias=True, weight_init='HeUniform')
+
+ self.relu = nn.ReLU()
+
+ self.data_dimension = data_dimension
+
+ self.channels_decoder = channels_decoder
+
+ def construct(self, x):
+ """
+ decoder construct
+ """
+ x = self.dense1(x)
+ x = self.dense2(x)
+ x = self.dense3(x)
+
+ x = self.reshape(x, (ops.shape(x)[0], self.channels_decoder[0],
+ round(pow(ops.shape(x)[-1]/self.channels_decoder[0], 0.5)), -1))
+
+ x = self.conv1(x)
+ x = self.relu(x)
+ x = ops.ResizeNearestNeighbor((self.data_dimension[5], self.data_dimension[5]))(x)
+
+ x = self.conv2(x)
+ x = self.relu(x)
+ x = ops.ResizeNearestNeighbor((self.data_dimension[4], self.data_dimension[4]))(x)
+
+ x = self.conv3(x)
+ x = self.relu(x)
+ x = ops.ResizeNearestNeighbor((self.data_dimension[3], self.data_dimension[3]))(x)
+
+ x = self.conv4(x)
+ x = self.relu(x)
+ x = ops.ResizeNearestNeighbor((self.data_dimension[2], self.data_dimension[2]))(x)
+
+ x = self.conv5(x)
+ x = self.relu(x)
+ x = ops.ResizeNearestNeighbor((self.data_dimension[1], self.data_dimension[1]))(x)
+
+ x = self.conv6(x)
+ x = self.relu(x)
+ x = ops.ResizeNearestNeighbor((self.data_dimension[0], self.data_dimension[0]))(x)
+
+ x = self.conv7(x)
+ return x
+
+
+class CaeNet2D(nn.Cell):
+ """
+ cae net
+ """
+ def __init__(self, data_dimension, conv_kernel, maxpool_kernel, maxpool_stride,
+ channels_encoder, channels_decoder, channels_dense):
+ super(CaeNet2D, self).__init__()
+ self.encoder = CaeEncoder2D(conv_kernel, maxpool_kernel, maxpool_stride, channels_encoder, channels_dense)
+ self.decoder = CaeDecoder2D(data_dimension, conv_kernel, channels_decoder, channels_dense)
def construct(self, x):
lattent = self.encoder(x)
@@ -166,8 +320,8 @@ class Lstm(nn.Cell):
self.num_layers = num_layers
def construct(self, x):
- h0 = ops.zeros((self.num_layers, ops.shape(x)[0], self.hidden_size), float32)
- c0 = ops.zeros((self.num_layers, ops.shape(x)[0], self.hidden_size), float32)
+ h0 = ops.zeros((self.num_layers, ops.shape(x)[0], self.hidden_size), mstype.float32)
+ c0 = ops.zeros((self.num_layers, ops.shape(x)[0], self.hidden_size), mstype.float32)
x, _ = self.lstm(x, (h0, c0))
x = self.dense(x)
return x
diff --git a/MindFlow/applications/data_driven/cae_lstm/sod/src/postprocess.py b/MindFlow/applications/data_driven/cae_lstm/src/postprocess.py
similarity index 72%
rename from MindFlow/applications/data_driven/cae_lstm/sod/src/postprocess.py
rename to MindFlow/applications/data_driven/cae_lstm/src/postprocess.py
index 3502c07221332ebd563cd4020f185960216e62fc..cc864340ba3eb229fa27cfac8ed64a54a9332890 100644
--- a/MindFlow/applications/data_driven/cae_lstm/sod/src/postprocess.py
+++ b/MindFlow/applications/data_driven/cae_lstm/src/postprocess.py
@@ -42,19 +42,6 @@ def plot_cae_prediction(cae_encoded, cae_predict, true_data, plot_dir, time_size
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
- # cae_prediction
- plt.plot(true_data[100, 56:], 'k-', label='true, time=100')
- plt.plot(true_data[200, 56:], 'b-', label='true, time=200')
- plt.plot(true_data[530, 56:], 'r-', label='true, time=530')
- plt.plot(cae_predict[100, 56:], 'k--', label='cae_prediction, time=100')
- plt.plot(cae_predict[200, 56:], 'b--', label='cae_prediction, time=200')
- plt.plot(cae_predict[530, 56:], 'r--', label='cae_prediction, time=530')
- plt.ylabel('density')
- plt.xlabel('x')
- plt.legend()
- plt.savefig(f'{plot_dir}/cae_prediction.png')
- plt.close()
-
# relative_error
time_true = np.arange(0, time_size)
cae_error = np.zeros(time_size)
@@ -80,19 +67,6 @@ def plot_cae_lstm_prediction(lstm_latent, cae_lstm_predict, true_data, plot_dir,
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
- # cae_lstm_prediction
- plt.plot(true_data[100, 56:], 'k-', label='true, time=100')
- plt.plot(true_data[200, 56:], 'b-', label='true, time=200')
- plt.plot(true_data[530, 56:], 'r-', label='true, time=530')
- plt.plot(cae_lstm_predict[100-time_window, 56:], 'k--', label='cae_lstm, time=100')
- plt.plot(cae_lstm_predict[200-time_window, 56:], 'b--', label='cae_lstm, time=200')
- plt.plot(cae_lstm_predict[530-time_window, 56:], 'r--', label='cae_lstm, time=530')
- plt.ylabel('density')
- plt.xlabel('x')
- plt.legend()
- plt.savefig(f'{plot_dir}/cae_lstm_prediction.png')
- plt.close()
-
# relative_error
time_true = np.arange(0, time_size)
time_predict = time_true[time_window:]
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/README.md b/MindFlow/applications/data_driven/fluid_structure_interaction/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ca5edc4348b10455a84854d9027ab62529969765
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/README.md
@@ -0,0 +1,92 @@
+---
+
+# Modeling method of Fluid-structure interaction system based on deep neural network
+
+## Overview
+
+### Background
+
+Aeroelasticity of aircraft is a typical fluid-structure interaction (FSI) problem, which studies the coupling relationship between aircraft structure and aerodynamics. High precision computational fluid dynamics (CFD) technology can accurately simulate the evolution process of the flow field around the structure to obtain the force situation of the structure, but the huge number of grids leads to high computational cost.
+Therefore, the simulation of fluid structure coupling system based on computational fluid dynamics and computational structural dynamics coupling strategy is time-consuming, especially in the initial design stage, which requires repeated iterative design. Many researchers try to use the data-driven method to build the flow field evolution model to achieve high-precision and fast prediction of the flow field, so as to improve the simulation efficiency of the fluid structure coupling system.
+In recent years, the rapid development of deep neural network technology depends on its strong nonlinear learning ability and depth feature capture ability, and has achieved many successful applications in the problem of flow field modeling.
+Among them, flow field reconstruction can quickly predict different flow fields by building a mapping model between geometric shape and flow conditions to the flow field information at space points, which has attracted much attention because it can quickly give the current flow field status.
+In this paper, the neural network model is coupled with the computational structural dynamics equation to realize the modeling of fluid structure coupling system, further improve the neural network structure and optimize the data structure, so as to obtain higher precision flow field prediction results and achieve more accurate fluid structure coupling response prediction.
+
+### Method
+
+The traditional numerical simulation framework of fluid structure interaction is composed of two parts: computational fluid dynamics solver and computational solid mechanics solver. The two solvers solve the state of fluid and structure at the next moment in the fluid domain and solid domain respectively, and transfer information at the interface as the input for the next calculation. The coupling process is shown in the following figure.
+The fluid structure interaction modeling framework based on deep neural network proposed in this paper still adopts the same strategy. The framework uses deep neural network instead of CFD solver to predict the evolution of flow field. The structural response is still calculated by CSD solver. The structural displacement and flow field surface pressure are transferred between deep neural network and computational solid mechanics solver.
+
+
+
+
+
+The basic framework of the fluid structure interaction depth neural network model (hdnn) in this paper is mainly based on the previous work: Based on this [paper](https://link.springer.com/article/10.1007/s10409-021-01129-4)
+: the prediction method of fluid solid coupling unsteady flow based on hybrid depth neural network is mainly composed of convolution neural network (CNN), convolution long and short term memory network (convlstm) and deconvolution neural network (decnn). CNN reduces the dimension of time series flow field and realizes feature extraction; Convlstm learns low dimensional spatio-temporal features and predicts them; Finally, decnn realizes the reconstruction of the predicted flow field.
+
++ Input layer: Current flow field state and boundary conditions;
++ Convolutional layer: Capturing the spatial features of the flow field and reducing its dimensionality, and using low dimensional flow field features to predict flow field evolution can improve computational efficiency;
++ LSTM layer: Predict the flow field characteristics of the next moment based on the captured current flow field characteristics and structural motion conditions;
++ Deconvolution output layer: Restores the low-dimensional features of the predicted flow field to high-dimensional
+ space, reconstructs the transient flow field at the next moment through multi-layer DeCNN, and outputs visual
+ prediction results
+
+
+
+
+
+### Dataset
+
++ Dataset directory: The total directory of the fluid–structure interaction dataset is: FSI, under which there are six subdirectories named Uri and i is 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, representing different reduction speeds. Reduced velocity is a Dimensionless quantity quantity used to describe the scaling ratio of the motion velocity of an object relative to the fluid characteristics in a specific fluid environment. It is often used to analyze and compare the motion of objects in different fluid systems. Each subdirectory contains total_puv.mat and velocity.mat, respectively, represent mat files for pressure and velocity data and cylindrical motion velocity
+
++ Source: Compute the flow field through CFD simulation, specifically using ANSYS Fluent to obtain numerical simulation
+ flow field data of unsteady two-dimensional cylindrical flow
+
++ Establishment method: The moving structure (cylinder) in the flow field makes one-dimensional Simple harmonic motion in the vertical direction. Physical modeling of two-dimensional cylindrical flow field, mesh discretization/partitioning, and solving control equations using Reynolds time averaged simulation method to obtain flow field information. Dimensionalize the physical quantities of the flow field and place grid sampling points in the sampling area to obtain a sample set for training and testing
+
++ Specification: Each flow field snapshot of dataset contains three channels, representing the pressure distribution information, horizontal velocity information, and vertical velocity information of the flow field
+
+### Effect
+
+Under the condition that only the initial flow field state and cylinder position are given, the coupled model can complete the prediction of the entire fluid–structure interaction evolution process. The structural response predicted by the coupled model is in good agreement with the structural response simulated by the traditional method, and the flow field state with high resolution at the current time can be given at each time, and the prediction speed is 20 times higher than that of the traditional method.
+
+## Quick Start
+
+### Training Method 1: Call the `train.py` script on the command line
+
+python train.py --config_file_path ./config.yaml --data_list ['5.0', '5.5', '6.0', '6.5'] --batch_size 32 --mode GRAPH --save_graphs False --save_graphs_path ./summary --device_target Ascend --device_id 0
+
+Among them,
+`--config_file_path` represents the parameter and path control file, default './config.yaml';
+
+`--data_list` represents the dataset used for training, defaults ['5.0', '5.5', '6.0', '6.5'];
+
+`--batch_size` represents the number of images that will be sent to the network during each training, default 32;
+
+`--mode` represents the running mode, 'GRAPH' indicates the static Graphical model, 'PYNATIVE' indicates the dynamic Graphical model, default 'GRAPH';
+
+`--save_graphs` represents whether to save the calculation graph, default 'False';
+
+`--save_graphs_path` represents the path where the calculation graph is saved, default './summary';
+
+`--device_target` represents the type of computing platform used, which can be selected as 'Ascend' or 'GPU', default 'Ascend';
+
+`--device_id` represents the calculation card number used, which can be filled in according to the actual situation, default 0
+
+### Training Method 2: Running Jupyter Notebook
+
+You can run training and validation code line by line using both the [Chinese version](fluid_structure_interaction_CN.ipynb) and the [English version](fluid_structure_interaction.ipynb) of Jupyter Notebook.
+
+## Results Display
+
+The following figure shows the flow field prediction status of a deep neural network at different times within a cycle. Given only the initial flow field state and cylinder position, the coupled model completed the prediction task of the entire fluid–structure interaction evolution process.
+
+
+
+## Contributor
+
+gitee id:[DUJiaoxi](https://gitee.com/ddd000g)
+
+email: dujiaoxi@stu.xjtu.edu.cn
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/config.yaml b/MindFlow/applications/data_driven/fluid_structure_interaction/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..137b6311a6b0a56913dd86e47d60bf63d8e07f8d
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/config.yaml
@@ -0,0 +1,22 @@
+data:
+ data_dir: "./FSI" # dataset store direction
+ time_steps: 1 # previous time steps used to predict
+
+model:
+ in_channels: 3 # the number of channels in the input space
+ num_layers: 12 # the number of Convolutional and DeConvolutional layer
+ kernel_size: 4 # the size of kernel in Convolutional and DeConvolutional layer
+ num_convlstm_layers: 1 # the number of ConvLSTM layers
+
+optimizer:
+ lr: 0.001 # learning rate
+ epochs: 200 # the number of training epochs
+ eval_interval: 10 # time interval for model evaluation
+ save_ckpt_interval: 10 # save the model once for 10 epochs
+ ckpt_dir: "./ckpt_dir" # the directory where the model files are saved
+
+prediction:
+ data_list: [ "4.5" ] # prediction dataset list
+ ckpt_path: "./ckpt_dir/net_200.ckpt" # the path of ckpt file used in prediction process
+ pred_continue_dir: "./save_prediction_continue" # store direction of prediction_data.mat
+ save_prediction_dir: "./save_prediction_dir" # store direction of prediction_v_d.mat
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/eval.py b/MindFlow/applications/data_driven/fluid_structure_interaction/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..ceb924ec04921eb8bbf297fe194077bf8bfc1bf5
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/eval.py
@@ -0,0 +1,157 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Prediction process"""
+import os
+import time
+import argparse
+import numpy as np
+from scipy.io import savemat
+
+from mindspore import nn, ops, Tensor, load_checkpoint, dataset, load_param_into_net, set_seed
+from mindflow.utils import load_yaml_config
+
+from src import my_test_dataset, AEnet
+
+np.random.seed(0)
+set_seed(0)
+
+
+def prediction():
+ """Process of predict with trained net"""
+ # prepare params
+ config = load_yaml_config(args.config_file_path)
+ data_params = config["data"]
+ model_params = config["model"]
+ prediction_params = config["predict"]
+ pred_continue_dir = prediction_params["pred_continue_dir"]
+ save_prediction_dir = prediction_params["save_prediction_dir"]
+
+ # prepare network
+ net = AEnet(in_channels=model_params["in_channels"],
+ num_layers=model_params["num_layers"],
+ kernel_size=model_params["kernel_size"],
+ num_convlstm_layers=model_params["num_convlstm_layers"])
+ m_state_dict = load_checkpoint(prediction_params["ckpt_path"])
+ load_param_into_net(net, m_state_dict)
+
+ # prepare dataset
+ data_set, surf_xy = my_test_dataset(data_params["data_dir"], data_params["time_steps"],
+ prediction_params["data_list"])
+
+ test_dataset = dataset.GeneratorDataset(data_set, ["input", "velocity", "ur", "label"], shuffle=False)
+ test_dataset = test_dataset.batch(batch_size=1, drop_remainder=True)
+
+ if not os.path.exists(pred_continue_dir):
+ os.mkdir(pred_continue_dir)
+ if not os.path.exists(save_prediction_dir):
+ os.mkdir(save_prediction_dir)
+
+ # prepare loss function: MSE loss function
+ loss_func = nn.MSELoss()
+
+ # predicted loss
+ test_losses = []
+
+ test_v = []
+ test_y = []
+ test_lift = []
+ test_total = []
+ real_y = []
+
+ predict = []
+ real = []
+
+ for i, (inputvar, velocityvar, urvar, targetvar) in enumerate(test_dataset):
+ if i == 0:
+ inputs = inputvar
+ y = np.max(surf_xy[0, :, 1]) - 1.55
+ velocity = velocityvar
+ else:
+ inputs = ops.operations.ExpandDims()(pred, 1)
+
+ real_y.append(np.max(surf_xy[2 * i, :, 1]) - 1.55)
+
+ pred = net(inputs, velocity, urvar)
+
+ loss = loss_func(inputs, pred)
+ loss_aver = loss.asnumpy().item()
+
+ # record training errors
+ test_losses.append(loss_aver)
+ print(f"test loss: {loss_aver:.6f}")
+
+ surf_x = surf_xy[0, :, 0]
+
+ # output flow field matrix
+ real.append(targetvar.numpy())
+ predict.append(pred.numpy())
+
+ # Integrate lift based on predicted surface pressure and calculate cylindrical velocity
+ m_cylinder = Tensor(0.011775)
+ k_spring = Tensor(2.29327)
+ d_t = Tensor(0.02)
+
+ surf_p = pred[0, 0, :, 0] * (1.0 * 1.0 * 1.0)
+ sum_p = 0.0
+
+ for j in range(127):
+ sum_p = sum_p + (surf_p[j] + surf_p[j + 1]) * (surf_x[j] - surf_x[j + 1]) * 0.5
+ sum_p = sum_p + (surf_p[127] + surf_p[0]) * (surf_x[127] - surf_x[0]) * 0.5
+
+ y = y + d_t * velocity
+ y = Tensor(y.astype(np.float32))
+
+ force_total = sum_p - y * k_spring
+
+ velocity = velocity + d_t * force_total / m_cylinder
+
+ # output velocity, lift force
+ test_v.append(velocity.numpy())
+ test_y.append(y.numpy())
+ test_lift.append(sum_p.numpy())
+ test_total.append(force_total.numpy())
+
+ savemat(f"{pred_continue_dir}/prediction_data.mat", {'predict': predict,
+ 'real': real,
+ 'surf_x': surf_x})
+
+ savemat(f"{save_prediction_dir}/prediction_v_d.mat", {'test_v': test_v,
+ 'test_y': test_y,
+ 'test_lift': test_lift,
+ 'test_total': test_total,
+ 'real_y': real_y})
+
+ print(f"mean test loss: {np.mean(test_losses):.6f}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="cylinder around flow ROM")
+
+ parser.add_argument("--config_file_path", type=str, default="./config.yaml")
+ parser.add_argument("--mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
+ help="Context mode, support 'GRAPH', 'PYNATIVE'")
+ parser.add_argument("--device_target", type=str, default="GPU", choices=["GPU", "Ascend"],
+ help="The target device to run, support 'Ascend', 'GPU'")
+ parser.add_argument("--device_id", type=int, default=0, help="ID of the target device")
+
+
+ args = parser.parse_args()
+
+ print("Process ID:", os.getpid())
+ print(f"device id: {args.device_id}")
+ start_time = time.time()
+ prediction()
+ print(f"End-to-End total time: {(time.time() - start_time):.2f}s")
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/fluid_structure_interaction.ipynb b/MindFlow/applications/data_driven/fluid_structure_interaction/fluid_structure_interaction.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..af16754e5fc7ffd1a4616f292b00fabfc966236e
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/fluid_structure_interaction.ipynb
@@ -0,0 +1,468 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "fda19887",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# Modeling method of Fluid-structure interaction system based on deep neural network\n",
+ "\n",
+ "## Overview\n",
+ "\n",
+ "Aeroelastic problem of aircraft is a typical Fluid–structure interaction (FSI) problem, which studies the coupling relationship between aircraft structure and aerodynamic force. High accuracy Computational fluid dynamics (CFD) technology can accurately simulate the evolution process of the flow field around the structure to obtain the force situation of the structure, but the huge number of grids leads to high computing costs. Many researchers try to use the data-driven method to build the flow field evolution model to achieve rapid prediction of the flow field with high accuracy, so as to improve the simulation efficiency of the Fluid–structure interaction system. In recent years, the rapidly developing deep neural network technology relies on its powerful nonlinear learning ability and deep feature capture ability, and has achieved many successful applications in flow field modeling problems. Among them, flow field reconstruction achieves rapid prediction of different flow fields by constructing a mapping model between geometric shapes and flow conditions to flow field information at spatial points, which is highly concerned for its ability to quickly provide the current flow field state.\n",
+ "\n",
+ "In order to efficiently solve the flow field reconstruction of the Fluid–structure interaction problem, this paper coupled the neural network model with the computational structural dynamic equation, realized the modeling of the Fluid–structure interaction system, further improved the neural network structure, optimized the data structure, so as to obtain more accurate flow field prediction results and achieve more accurate Fluid–structure interaction response prediction.\n",
+ "\n",
+ "## Problem description\n",
+ "\n",
+ "The traditional Fluid–structure interaction numerical simulation framework consists of a Computational fluid dynamics solver and a computational Solid mechanics solver. The two solvers solve the state of the fluid and structure at the next moment in the fluid domain and the solid domain respectively, and transmit information at the interface as the input for the next calculation. The coupling process is shown in the following figure. The Fluid–structure interaction modeling framework based on the depth neural network proposed in this paper still uses the same strategy. The framework uses the depth neural network instead of the CFD solver to predict the flow field evolution. The structural response is still calculated by the CSD solver. The structural displacement and flow field surface pressure are transferred between the depth neural network and the computational Solid mechanics solver.\n",
+ "\n",
+ "## Technology path\n",
+ "\n",
+ "The specific process of mindflow to solve this problem is as follows:\n",
+ "\n",
+ "1.Create data sets based on CFD numerical simulation results.\n",
+ "\n",
+ "2.The model is built using mindspire deep learning framework.\n",
+ "\n",
+ "3.Define the optimizer and loss function.\n",
+ "\n",
+ "4.Use mindspire's instant compilation to accelerate model training.\n",
+ "\n",
+ "5.Use the trained model for reasoning and visualization."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "14af9033",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "39552eaf",
+ "metadata": {},
+ "source": [
+ "## Model Architecture\n",
+ "\n",
+ "The basic framework of HDNN consists of convolutional neural network (CNN), convolutional long short-term memory network (ConvLSTM) and deconvolution neural network (DeCNN). CNN reduces the dimensionality of the time series flow field and achieves feature extraction; ConvLSTM learns low dimensional spatiotemporal features and makes predictions; Finally, DeCNN achieves reconstruction of predicted flow fields\n",
+ "\n",
+ "+ Input layer: current flow field state and boundary conditions\n",
+ "+ Convolutional layer: Capturing the spatial features of the flow field and reducing its dimensionality, using low dimensional flow field features to predict flow field evolution can improve computational efficiency\n",
+ "+ LSTM layer: predicts the flow field characteristics of the next moment based on the captured current flow field characteristics and structural motion conditions\n",
+ "+ Deconvolution output layer: Restores the low-dimensional features of the predicted flow field to high-dimensional space, reconstructs the transient flow field at the next moment through multi-layer DeCNN, and outputs visual prediction results"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "901d5c41",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7f317de2",
+ "metadata": {},
+ "source": [
+ "## Training dataset\n",
+ "\n",
+ "The dataset is constructed from multidimensional matrix flow field snapshot matrix constructed from numerical simulation of unsteady two-dimensional cylindrical flow field data\n",
+ "\n",
+ "+ The moving structure (cylinder) in the flow field makes one-dimensional Simple harmonic motion in the vertical direction. Physical modeling of two-dimensional cylindrical flow field, mesh discretization/partitioning, and solving control equations using Reynolds time averaged simulation method to obtain flow field information. Dimensionalize the physical quantities of the flow field and place grid sampling points in the sampling area to obtain a sample set for training and testing\n",
+ "+ Each flow field snapshot contains three channels, representing the pressure distribution information, horizontal velocity information, and vertical velocity information of the flow field\n",
+ "+ Dataset:[Download location](https://download.mindspore.cn/mindscience/mindflow/dataset/applications/data_driven/fluid_structure_interaction/)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "5ce12042",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import time\n",
+ "import argparse\n",
+ "import numpy as np\n",
+ "\n",
+ "from mindspore import nn, ops, context, save_checkpoint, set_seed, data_sink, jit\n",
+ "from mindflow.utils import load_yaml_config\n",
+ "\n",
+ "from src import generate_dataset, AEnet, save_loss_curve"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4e7406dd",
+ "metadata": {},
+ "source": [
+ "## Training environment\n",
+ "\n",
+ "+ The training adopts the static graphical model of Mindspot framework (GRAPH)\n",
+ "+ Train on CPU, GPU, or Ascend"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "f5c6d767",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "set_seed(0)\n",
+ "np.random.seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bbd5ca2c",
+ "metadata": {},
+ "source": [
+ "## Training hyperparameter\n",
+ "\n",
+ "Obtain hyperparameters for models, data, and optimizers from config"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "7f20dd1f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "parser = argparse.ArgumentParser(description=\"cylinder around flow ROM\")\n",
+ "\n",
+ "parser.add_argument(\"--mode\", type=str, default=\"GRAPH\", choices=[\"GRAPH\", \"PYNATIVE\"],\n",
+ " help=\"Context mode, support 'GRAPH', 'PYNATIVE'\")\n",
+ "parser.add_argument(\"--save_graphs\", type=bool, default=False, choices=[True, False],\n",
+ " help=\"Whether to save intermediate compilation graphs\")\n",
+ "parser.add_argument(\"--save_graphs_path\", type=str, default=\"./summary\")\n",
+ "parser.add_argument(\"--device_target\", type=str, default=\"Ascend\", choices=[\"GPU\", \"Ascend\"],\n",
+ " help=\"The target device to run, support 'GPU','Ascend'\")\n",
+ "parser.add_argument(\"--device_id\", type=int, default=0, help=\"ID of the target device\")\n",
+ "parser.add_argument(\"--data_list\", type=list, default=['5.0', '5.5', '6.0', '6.5'], help=\"The type for training\")\n",
+ "parser.add_argument('--batch_size', type=int, default=32, help=\"batch size\")\n",
+ "parser.add_argument(\"--config_file_path\", type=str, default=\"./config.yaml\")\n",
+ "\n",
+ "args = parser.parse_args()\n",
+ "\n",
+ "context.set_context(mode=context.GRAPH_MODE if args.mode.upper().startswith(\"GRAPH\") else context.PYNATIVE_MODE,\n",
+ " save_graphs=args.save_graphs, save_graphs_path=args.save_graphs_path,\n",
+ " device_target=args.device_target, device_id=args.device_id)\n",
+ "use_ascend = context.get_context(attr_key='device_target') == \"Ascend\"\n",
+ "\n",
+ "config = load_yaml_config(args.config_file_path)\n",
+ "data_params = config[\"data\"]\n",
+ "model_params = config[\"model\"]\n",
+ "optimizer_params = config[\"optimizer\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8e53d5ec",
+ "metadata": {},
+ "source": [
+ "## Training process file save path\n",
+ "\n",
+ "Save the trained model file in a folder every certain number of training sessions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "aa53aed1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ckpt_dir = optimizer_params[\"ckpt_dir\"]\n",
+ "if not os.path.exists(ckpt_dir):\n",
+ " os.mkdir(ckpt_dir)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "505908fc",
+ "metadata": {},
+ "source": [
+ "## Constructing neural network and optimizer\n",
+ "\n",
+ "The convolutional layer of the neural network has a total of 12 layers, ConvLSTM has 1 layer, and deconvolution has a total of 12 layers\n",
+ "\n",
+ "The Loss function uses the Mean squared error Loss function, and the optimizer uses the Adam (Adaptive Moment Estimation) optimization algorithm"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "37e0f61b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model = AEnet(in_channels=model_params[\"in_channels\"],\n",
+ " num_layers=model_params[\"num_layers\"],\n",
+ " kernel_size=model_params[\"kernel_size\"],\n",
+ " num_convlstm_layers=model_params[\"num_convlstm_layers\"])\n",
+ "\n",
+ "loss_func = nn.MSELoss()\n",
+ "optimizer = nn.Adam(params=model.trainable_params(), learning_rate=optimizer_params[\"lr\"])\n",
+ "if use_ascend:\n",
+ " from mindspore.amp import DynamicLossScaler, auto_mixed_precision, all_finite\n",
+ " loss_scaler = DynamicLossScaler(1024, 2, 100)\n",
+ " auto_mixed_precision(model, 'O1')\n",
+ "else:\n",
+ " loss_scaler = None"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "89d32ff9",
+ "metadata": {},
+ "source": [
+ "## Training framework\n",
+ "\n",
+ "Define the forward propagation function forward_ Fn, compare the predicted value with the true value to obtain the loss value and return it"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "9864f41d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def forward_fn(inputs, velocity, ur, label):\n",
+ " pred = model(inputs, velocity, ur)\n",
+ " loss = loss_func(pred, label)\n",
+ "\n",
+ " if use_ascend:\n",
+ " loss = loss_scaler.scale(loss)\n",
+ " return loss\n",
+ "\n",
+ "grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "faacf783",
+ "metadata": {},
+ "source": [
+ "## Dataset loading\n",
+ "\n",
+ "To generate_dataset parameter transfer to obtain training and validation datasets"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "dbe1356d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f\"==================Load data sample ===================\")\n",
+ "dataset_train, dataset_eval = generate_dataset(data_params[\"data_dir\"],\n",
+ " data_params[\"time_steps\"],\n",
+ " args.data_list)\n",
+ "print(f\"======================End Load========================\\n\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9da7331a",
+ "metadata": {},
+ "source": [
+ "## Data sink and model training\n",
+ "\n",
+ "Define train_ Step and Eval_ Step and use data_ Sink acceleration training, output the loss value and usage time during the training process, and save the model file every certain training round"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "e692f9ba",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f\"====================Start train=======================\")\n",
+ "@jit\n",
+ "def train_step(inputs, velocity, ur, label):\n",
+ " loss, grads = grad_fn(inputs, velocity, ur, label)\n",
+ " if use_ascend:\n",
+ " loss = loss_scaler.unscale(loss)\n",
+ " if all_finite(grads):\n",
+ " grads = loss_scaler.unscale(grads)\n",
+ " loss = ops.depend(loss, optimizer(grads))\n",
+ " return loss\n",
+ "\n",
+ "@jit\n",
+ "def eval_step(inputs, velocity, ur, label):\n",
+ " loss = forward_fn(inputs, velocity, ur, label)\n",
+ " loss = ops.sqrt(loss)\n",
+ " return loss\n",
+ "\n",
+ "train_sink_process = data_sink(train_step, dataset_train, sink_size=1)\n",
+ "eval_sink_process = data_sink(eval_step, dataset_eval, sink_size=1)\n",
+ "train_data_size, eval_data_size = dataset_train.get_dataset_size(), dataset_eval.get_dataset_size()\n",
+ "\n",
+ "avg_train_losses = []\n",
+ "avg_valid_losses = []\n",
+ "\n",
+ "for epoch in range(1, optimizer_params[\"epochs\"] + 1):\n",
+ " train_losses = 0\n",
+ " valid_losses = 0\n",
+ "\n",
+ " local_time_beg = time.time()\n",
+ " model.set_train(True)\n",
+ "\n",
+ " for _ in range(train_data_size):\n",
+ " step_train_loss = ops.squeeze(train_sink_process(), axis=())\n",
+ " step_train_loss = step_train_loss.asnumpy().item()\n",
+ " train_losses += step_train_loss\n",
+ "\n",
+ " train_loss = train_losses / train_data_size\n",
+ " avg_train_losses.append(train_loss)\n",
+ "\n",
+ " print(f\"epoch: {epoch}, epoch average train loss: {train_loss :.6f}, \"\n",
+ " f\"epoch time: {(time.time() - local_time_beg):.2f}s\")\n",
+ "\n",
+ " if epoch % optimizer_params[\"eval_interval\"] == 0:\n",
+ " print(f\"=================Start Evaluation=====================\")\n",
+ "\n",
+ " eval_time_beg = time.time()\n",
+ " model.set_train(False)\n",
+ " for _ in range(eval_data_size):\n",
+ " step_eval_loss = ops.squeeze(eval_sink_process(), axis=())\n",
+ " step_eval_loss = step_eval_loss.asnumpy().item()\n",
+ " valid_losses += step_eval_loss\n",
+ "\n",
+ " valid_loss = valid_losses / eval_data_size\n",
+ " avg_valid_losses.append(valid_loss)\n",
+ "\n",
+ " print(f\"epoch: {epoch}, epoch average valid loss: {valid_loss :.6f}, \"\n",
+ " f\"epoch time: {(time.time() - eval_time_beg):.2f}s\")\n",
+ " print(f\"==================End Evaluation======================\")\n",
+ "\n",
+ " if epoch % optimizer_params[\"save_ckpt_interval\"] == 0:\n",
+ " save_checkpoint(model, f\"{ckpt_dir}/net_{epoch}.ckpt\")\n",
+ "\n",
+ "save_loss_curve(avg_train_losses, 'Epoch', 'avg_train_losses', 'Avg_train_losses Curve', 'Avg_train_losses.png')\n",
+ "save_loss_curve(avg_valid_losses, 'Epoch', 'avg_valid_losses', 'Avg_valid_losses Curve', 'Avg_valid_losses.png')\n",
+ "\n",
+ "print(f\"=====================End train========================\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b1262b22",
+ "metadata": {},
+ "source": [
+ "## Set training conditions for parameter transmission\n",
+ "\n",
+ "When running the file, pass in the necessary parameters through the parameter parser to start training, and print the process and device id, as well as the total training time"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "d26ff8ed",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if __name__ == \"__main__\":\n",
+ " print(\"Process ID:\", os.getpid())\n",
+ " print(f\"device id: {args.device_id}\")\n",
+ " start_time = time.time()\n",
+ " train()\n",
+ " print(f\"End-to-End total time: {(time.time() - start_time):.2f}s\")"
+ ]
+ },
+ {
+ "cell_type": "raw",
+ "id": "4bcfdbdd",
+ "metadata": {},
+ "source": [
+ "Process ID: 529681\n",
+ "device id: 0\n",
+ "==================Load data sample ===================\n",
+ "======================End Load========================\n",
+ "\n",
+ "====================Start train=======================\n",
+ "epoch: 1, epoch average train loss: 0.092563, epoch time: 60.48s\n",
+ "epoch: 2, epoch average train loss: 0.033426, epoch time: 39.88s\n",
+ "epoch: 3, epoch average train loss: 0.009924, epoch time: 33.47s\n",
+ "epoch: 4, epoch average train loss: 0.003757, epoch time: 34.95s\n",
+ "epoch: 5, epoch average train loss: 0.002671, epoch time: 34.38s\n",
+ "epoch: 6, epoch average train loss: 0.002416, epoch time: 38.55s\n",
+ "epoch: 7, epoch average train loss: 0.001826, epoch time: 38.72s\n",
+ "epoch: 8, epoch average train loss: 0.001770, epoch time: 35.42s\n",
+ "epoch: 9, epoch average train loss: 0.001415, epoch time: 36.65s\n",
+ "epoch: 10, epoch average train loss: 0.001385, epoch time: 35.20s\n",
+ "=================Start Evaluation=====================\n",
+ "epoch: 10, epoch average valid loss: 0.033140, epoch time: 10.51s\n",
+ "==================End Evaluation======================\n",
+ "\n",
+ "...\n",
+ "\n",
+ "epoch: 191, epoch average train loss: 0.000208, epoch time: 38.77s\n",
+ "epoch: 192, epoch average train loss: 0.000159, epoch time: 39.22s\n",
+ "epoch: 193, epoch average train loss: 0.000320, epoch time: 38.57s\n",
+ "epoch: 194, epoch average train loss: 0.000156, epoch time: 39.06s\n",
+ "epoch: 195, epoch average train loss: 0.000164, epoch time: 39.48s\n",
+ "epoch: 196, epoch average train loss: 0.000175, epoch time: 39.90s\n",
+ "epoch: 197, epoch average train loss: 0.000210, epoch time: 38.63s\n",
+ "epoch: 198, epoch average train loss: 0.000178, epoch time: 38.70s\n",
+ "epoch: 199, epoch average train loss: 0.000246, epoch time: 34.93s\n",
+ "epoch: 200, epoch average train loss: 0.000165, epoch time: 35.63s\n",
+ "=================Start Evaluation=====================\n",
+ "epoch: 200, epoch average valid loss: 0.011407, epoch time: 9.19s\n",
+ "==================End Evaluation======================\n",
+ "=====================End train========================\n",
+ "End-to-End total time: 7694.45s"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "25aac646",
+ "metadata": {},
+ "source": [
+ "## Visualization of predicted flow field results\n",
+ "\n",
+ "+ Moving boundary flow field prediction starts by executing eval.py. The coupled model can complete the prediction task of the entire Fluid–structure interaction evolution process under the condition that only the initial flow field state and cylinder position are given\n",
+ "+ The following figure shows the flow field prediction status of a fully trained HDNN model for a deep neural network at different times within a cycle"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "50e40259",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.16"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/fluid_structure_interaction_CN.ipynb b/MindFlow/applications/data_driven/fluid_structure_interaction/fluid_structure_interaction_CN.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..2ca35c184b8979bc2e2a1535983715fe3726e75d
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/fluid_structure_interaction_CN.ipynb
@@ -0,0 +1,468 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "fda19887",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# 基于深度神经网络的流固耦合系统建模方法\n",
+ "\n",
+ "## 概述\n",
+ "\n",
+ "飞行器气动弹性问题是研究飞行器结构与气动力相互耦合关系,是一种典型的流固耦合(fluid-structure interaction, FSI)问题。高精确度计算流体力学技术(CFD)能够准确地模拟结构周围流场演化过程从而获得结构受力情况,但是数量巨大的网格导致计算成本极高。许多研究者尝试使用数据驱动的方法构建流场演化模型,实现较高精度流场的快速预测,从而提高流固耦合系统模拟效率。近年来迅速发展的深度神经网络技术依赖于其强大的非线性学习能力以及深度特征捕捉能力,在流场建模问题中已经取得了诸多成功应用。其中流场重构通过构建几何形状和流动工况至空间点处的流场信息之间的映射模型,实现不同流场的快速预测,因其能快速给出当前流场状态而备受关注。\n",
+ "\n",
+ "为了高效解决流固耦合问题的流场重构,本文将神经网络模型与计算结构动力学方程耦合,实现了流固耦合系统的建模,进一步改进神经网络结构,优化数据结构,从而获得更高精度的流场预测结果,实现更准确的流固耦合响应预测。\n",
+ "\n",
+ "## 问题描述\n",
+ "\n",
+ "传统的流固耦合数值仿真框架由计算流体力学求解器和计算固体力学求解器两部分组成,两个求解器分别在流体域和固体域求解下一时刻流体和结构的状态,并在交界面进行信息传递作为下一步计算的输入,耦合过程如下图所示。本文提出的基于深度神经网络的流固耦合建模框架仍然采用相同的策略,该框架使用深度神经网络代替CFD求解器来预测流场演化,结构响应仍由CSD求解器计算得到,结构位移和流场表面压力在深度神经网络和计算固体力学求解器之间传递。\n",
+ "\n",
+ "## 技术路径\n",
+ "\n",
+ "MindFlow求解该问题的具体流程如下:\n",
+ "\n",
+ "1.根据CFD数值模拟结果创建数据集。\n",
+ "\n",
+ "2.使用MindSpore深度学习框架构建模型。\n",
+ "\n",
+ "3.定义优化器与损失函数。\n",
+ "\n",
+ "4.使用MindSpore的即时编译等加速模型训练。\n",
+ "\n",
+ "5.利用训练好的模型进行推理和可视化。"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "14af9033",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "39552eaf",
+ "metadata": {},
+ "source": [
+ "## 模型架构\n",
+ "\n",
+ "HDNN的基本框架由卷积神经网络(CNN)、卷积长短期记忆网络(ConvLSTM)和反卷积神经网络(DeCNN)组成。CNN降低了时间序列流场的维数,实现特征提取;ConvLSTM学习低维时空特征并进行预测;最后,DeCNN实现预测流场的重建\n",
+ "\n",
+ "+ 输入层:当前流场状态和边界条件;\n",
+ "+ 卷积层:捕获流场的空间特征并降低维数,使用低维流场特征预测流场演化可以提高计算效率;\n",
+ "+ LSTM层:根据捕获的当前时刻流场特征和结构运动条件预测下一时刻的流场特征;\n",
+ "+ 反卷积输出层:将预测流场的低维特征恢复到高维空间,通过多层DeCNN重构下一时刻的瞬态流场,并输出可视化预测结果"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "901d5c41",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7f317de2",
+ "metadata": {},
+ "source": [
+ "## 训练数据集\n",
+ "\n",
+ "数据集由非定常二维圆柱绕流的数值仿真流场数据构建的多维矩阵流场快照矩阵构建而成\n",
+ "\n",
+ "+ 流场中的运动结构(圆柱)在竖直方向做一维简谐运动。对二维圆柱流场物理建模、网格离散/划分并采用雷诺时均模拟方法求解控制方程获取流场信息。将流场物理量无量纲化,并在采样区域中放置网格采样点,获得用于训练和测试的样本集\n",
+ "+ 每张流场快照包含3个通道,代表流场的压强分布信息、水平速度信息、竖直速度信息\n",
+ "+ 数据集:[下载位置](https://download.mindspore.cn/mindscience/mindflow/dataset/applications/data_driven/fluid_structure_interaction/)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "5ce12042",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import time\n",
+ "import argparse\n",
+ "import numpy as np\n",
+ "\n",
+ "from mindspore import nn, ops, context, save_checkpoint, set_seed, data_sink, jit\n",
+ "from mindflow.utils import load_yaml_config\n",
+ "\n",
+ "from src import generate_dataset, AEnet, save_loss_curve"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4e7406dd",
+ "metadata": {},
+ "source": [
+ "## 训练环境\n",
+ "\n",
+ "+ 训练采用Mindspore框架的静态图模式(GRAPH)\n",
+ "+ 在CPU、GPU或Ascend进行训练"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "f5c6d767",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "set_seed(0)\n",
+ "np.random.seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bbd5ca2c",
+ "metadata": {},
+ "source": [
+ "## 训练超参数\n",
+ "\n",
+ "从config中获得模型、数据、优化器的超参"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "7f20dd1f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "parser = argparse.ArgumentParser(description=\"cylinder around flow ROM\")\n",
+ "\n",
+ "parser.add_argument(\"--mode\", type=str, default=\"GRAPH\", choices=[\"GRAPH\", \"PYNATIVE\"],\n",
+ " help=\"Context mode, support 'GRAPH', 'PYNATIVE'\")\n",
+ "parser.add_argument(\"--save_graphs\", type=bool, default=False, choices=[True, False],\n",
+ " help=\"Whether to save intermediate compilation graphs\")\n",
+ "parser.add_argument(\"--save_graphs_path\", type=str, default=\"./summary\")\n",
+ "parser.add_argument(\"--device_target\", type=str, default=\"Ascend\", choices=[\"GPU\", \"Ascend\"],\n",
+ " help=\"The target device to run, support 'GPU','Ascend'\")\n",
+ "parser.add_argument(\"--device_id\", type=int, default=0, help=\"ID of the target device\")\n",
+ "parser.add_argument(\"--data_list\", type=list, default=['5.0', '5.5', '6.0', '6.5'], help=\"The type for training\")\n",
+ "parser.add_argument('--batch_size', type=int, default=32, help=\"batch size\")\n",
+ "parser.add_argument(\"--config_file_path\", type=str, default=\"./config.yaml\")\n",
+ "\n",
+ "args = parser.parse_args()\n",
+ "\n",
+ "context.set_context(mode=context.GRAPH_MODE if args.mode.upper().startswith(\"GRAPH\") else context.PYNATIVE_MODE,\n",
+ " save_graphs=args.save_graphs, save_graphs_path=args.save_graphs_path,\n",
+ " device_target=args.device_target, device_id=args.device_id)\n",
+ "use_ascend = context.get_context(attr_key='device_target') == \"Ascend\"\n",
+ "\n",
+ "config = load_yaml_config(args.config_file_path)\n",
+ "data_params = config[\"data\"]\n",
+ "model_params = config[\"model\"]\n",
+ "optimizer_params = config[\"optimizer\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8e53d5ec",
+ "metadata": {},
+ "source": [
+ "## 训练过程文件保存路径\n",
+ "\n",
+ "将训练好的模型文件每隔一定训练次数保存在文件夹下"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "aa53aed1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ckpt_dir = optimizer_params[\"ckpt_dir\"]\n",
+ "if not os.path.exists(ckpt_dir):\n",
+ " os.mkdir(ckpt_dir)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "505908fc",
+ "metadata": {},
+ "source": [
+ "## 构建神经网络及优化器\n",
+ "\n",
+ "神经网络的卷积层共有12层,ConvLSTM有1层,反卷积共有12层\n",
+ "\n",
+ "损失函数使用均方误差(Mean Squared Error)损失函数,优化器使用Adam(Adaptive Moment Estimation)优化算法"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "37e0f61b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model = AEnet(in_channels=model_params[\"in_channels\"],\n",
+ " num_layers=model_params[\"num_layers\"],\n",
+ " kernel_size=model_params[\"kernel_size\"],\n",
+ " num_convlstm_layers=model_params[\"num_convlstm_layers\"])\n",
+ "\n",
+ "loss_func = nn.MSELoss()\n",
+ "optimizer = nn.Adam(params=model.trainable_params(), learning_rate=optimizer_params[\"lr\"])\n",
+ "if use_ascend:\n",
+ " from mindspore.amp import DynamicLossScaler, auto_mixed_precision, all_finite\n",
+ " loss_scaler = DynamicLossScaler(1024, 2, 100)\n",
+ " auto_mixed_precision(model, 'O1')\n",
+ "else:\n",
+ " loss_scaler = None"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "89d32ff9",
+ "metadata": {},
+ "source": [
+ "## 训练框架\n",
+ "\n",
+ "定义前向传播函数forward_fn,将预测值和真值比较得到损失值loss并返回"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "9864f41d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def forward_fn(inputs, velocity, ur, label):\n",
+ " pred = model(inputs, velocity, ur)\n",
+ " loss = loss_func(pred, label)\n",
+ "\n",
+ " if use_ascend:\n",
+ " loss = loss_scaler.scale(loss)\n",
+ " return loss\n",
+ "\n",
+ "grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "faacf783",
+ "metadata": {},
+ "source": [
+ "## 数据集加载\n",
+ "\n",
+ "给generate_dataset传参,得到训练数据集和验证数据集"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "dbe1356d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f\"==================Load data sample ===================\")\n",
+ "dataset_train, dataset_eval = generate_dataset(data_params[\"data_dir\"],\n",
+ " data_params[\"time_steps\"],\n",
+ " args.data_list)\n",
+ "print(f\"======================End Load========================\\n\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9da7331a",
+ "metadata": {},
+ "source": [
+ "## 数据下沉及模型训练\n",
+ "\n",
+ "定义train_step和eval_step并使用data_sink加速训练,输出训练过程的损失值和使用时间,并每隔一定训练轮次保存模型文件"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "e692f9ba",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f\"====================Start train=======================\")\n",
+ "@jit\n",
+ "def train_step(inputs, velocity, ur, label):\n",
+ " loss, grads = grad_fn(inputs, velocity, ur, label)\n",
+ " if use_ascend:\n",
+ " loss = loss_scaler.unscale(loss)\n",
+ " if all_finite(grads):\n",
+ " grads = loss_scaler.unscale(grads)\n",
+ " loss = ops.depend(loss, optimizer(grads))\n",
+ " return loss\n",
+ "\n",
+ "@jit\n",
+ "def eval_step(inputs, velocity, ur, label):\n",
+ " loss = forward_fn(inputs, velocity, ur, label)\n",
+ " loss = ops.sqrt(loss)\n",
+ " return loss\n",
+ "\n",
+ "train_sink_process = data_sink(train_step, dataset_train, sink_size=1)\n",
+ "eval_sink_process = data_sink(eval_step, dataset_eval, sink_size=1)\n",
+ "train_data_size, eval_data_size = dataset_train.get_dataset_size(), dataset_eval.get_dataset_size()\n",
+ "\n",
+ "avg_train_losses = []\n",
+ "avg_valid_losses = []\n",
+ "\n",
+ "for epoch in range(1, optimizer_params[\"epochs\"] + 1):\n",
+ " train_losses = 0\n",
+ " valid_losses = 0\n",
+ "\n",
+ " local_time_beg = time.time()\n",
+ " model.set_train(True)\n",
+ "\n",
+ " for _ in range(train_data_size):\n",
+ " step_train_loss = ops.squeeze(train_sink_process(), axis=())\n",
+ " step_train_loss = step_train_loss.asnumpy().item()\n",
+ " train_losses += step_train_loss\n",
+ "\n",
+ " train_loss = train_losses / train_data_size\n",
+ " avg_train_losses.append(train_loss)\n",
+ "\n",
+ " print(f\"epoch: {epoch}, epoch average train loss: {train_loss :.6f}, \"\n",
+ " f\"epoch time: {(time.time() - local_time_beg):.2f}s\")\n",
+ "\n",
+ " if epoch % optimizer_params[\"eval_interval\"] == 0:\n",
+ " print(f\"=================Start Evaluation=====================\")\n",
+ "\n",
+ " eval_time_beg = time.time()\n",
+ " model.set_train(False)\n",
+ " for _ in range(eval_data_size):\n",
+ " step_eval_loss = ops.squeeze(eval_sink_process(), axis=())\n",
+ " step_eval_loss = step_eval_loss.asnumpy().item()\n",
+ " valid_losses += step_eval_loss\n",
+ "\n",
+ " valid_loss = valid_losses / eval_data_size\n",
+ " avg_valid_losses.append(valid_loss)\n",
+ "\n",
+ " print(f\"epoch: {epoch}, epoch average valid loss: {valid_loss :.6f}, \"\n",
+ " f\"epoch time: {(time.time() - eval_time_beg):.2f}s\")\n",
+ " print(f\"==================End Evaluation======================\")\n",
+ "\n",
+ " if epoch % optimizer_params[\"save_ckpt_interval\"] == 0:\n",
+ " save_checkpoint(model, f\"{ckpt_dir}/net_{epoch}.ckpt\")\n",
+ "\n",
+ "save_loss_curve(avg_train_losses, 'Epoch', 'avg_train_losses', 'Avg_train_losses Curve', 'Avg_train_losses.png')\n",
+ "save_loss_curve(avg_valid_losses, 'Epoch', 'avg_valid_losses', 'Avg_valid_losses Curve', 'Avg_valid_losses.png')\n",
+ "\n",
+ "print(f\"=====================End train========================\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b1262b22",
+ "metadata": {},
+ "source": [
+ "## 设置训练条件 传参\n",
+ "\n",
+ "当运行该文件时,通过参数解析器传入必要参数,开始训练,并打印进程和设备id,以及训练总时间"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "d26ff8ed",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if __name__ == \"__main__\":\n",
+ " print(\"Process ID:\", os.getpid())\n",
+ " print(f\"device id: {args.device_id}\")\n",
+ " start_time = time.time()\n",
+ " train()\n",
+ " print(f\"End-to-End total time: {(time.time() - start_time):.2f}s\")"
+ ]
+ },
+ {
+ "cell_type": "raw",
+ "id": "4bcfdbdd",
+ "metadata": {},
+ "source": [
+ "Process ID: 529681\n",
+ "device id: 0\n",
+ "==================Load data sample ===================\n",
+ "======================End Load========================\n",
+ "\n",
+ "====================Start train=======================\n",
+ "epoch: 1, epoch average train loss: 0.092563, epoch time: 60.48s\n",
+ "epoch: 2, epoch average train loss: 0.033426, epoch time: 39.88s\n",
+ "epoch: 3, epoch average train loss: 0.009924, epoch time: 33.47s\n",
+ "epoch: 4, epoch average train loss: 0.003757, epoch time: 34.95s\n",
+ "epoch: 5, epoch average train loss: 0.002671, epoch time: 34.38s\n",
+ "epoch: 6, epoch average train loss: 0.002416, epoch time: 38.55s\n",
+ "epoch: 7, epoch average train loss: 0.001826, epoch time: 38.72s\n",
+ "epoch: 8, epoch average train loss: 0.001770, epoch time: 35.42s\n",
+ "epoch: 9, epoch average train loss: 0.001415, epoch time: 36.65s\n",
+ "epoch: 10, epoch average train loss: 0.001385, epoch time: 35.20s\n",
+ "=================Start Evaluation=====================\n",
+ "epoch: 10, epoch average valid loss: 0.033140, epoch time: 10.51s\n",
+ "==================End Evaluation======================\n",
+ "\n",
+ "...\n",
+ "\n",
+ "epoch: 191, epoch average train loss: 0.000208, epoch time: 38.77s\n",
+ "epoch: 192, epoch average train loss: 0.000159, epoch time: 39.22s\n",
+ "epoch: 193, epoch average train loss: 0.000320, epoch time: 38.57s\n",
+ "epoch: 194, epoch average train loss: 0.000156, epoch time: 39.06s\n",
+ "epoch: 195, epoch average train loss: 0.000164, epoch time: 39.48s\n",
+ "epoch: 196, epoch average train loss: 0.000175, epoch time: 39.90s\n",
+ "epoch: 197, epoch average train loss: 0.000210, epoch time: 38.63s\n",
+ "epoch: 198, epoch average train loss: 0.000178, epoch time: 38.70s\n",
+ "epoch: 199, epoch average train loss: 0.000246, epoch time: 34.93s\n",
+ "epoch: 200, epoch average train loss: 0.000165, epoch time: 35.63s\n",
+ "=================Start Evaluation=====================\n",
+ "epoch: 200, epoch average valid loss: 0.011407, epoch time: 9.19s\n",
+ "==================End Evaluation======================\n",
+ "=====================End train========================\n",
+ "End-to-End total time: 7694.45s"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "25aac646",
+ "metadata": {},
+ "source": [
+ "## 预测流场结果可视化\n",
+ "\n",
+ "+ 动边界流场预测通过执行eval.py开始预测,耦合模型可在仅给定初始流场状态和圆柱位置情况下,完成整个流固耦合演化过程的预测任务\n",
+ "+ 下图为训练完备的HDNN模型实现对一个周期内不同时刻深度神经网络的流场预测状态"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "50e40259",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.16"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/images/HDNN.jpg b/MindFlow/applications/data_driven/fluid_structure_interaction/images/HDNN.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..737ef8ae32069e0d902e0bbea4147c1c727f39df
Binary files /dev/null and b/MindFlow/applications/data_driven/fluid_structure_interaction/images/HDNN.jpg differ
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/images/p1.png b/MindFlow/applications/data_driven/fluid_structure_interaction/images/p1.png
new file mode 100644
index 0000000000000000000000000000000000000000..ccccb79dd28c2a6e8ec59827cf3cb96215c2666d
Binary files /dev/null and b/MindFlow/applications/data_driven/fluid_structure_interaction/images/p1.png differ
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/images/pred_cycle_puv.jpg b/MindFlow/applications/data_driven/fluid_structure_interaction/images/pred_cycle_puv.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..839c03ecc65d2e3ead40aefa6566d3e1b405f695
Binary files /dev/null and b/MindFlow/applications/data_driven/fluid_structure_interaction/images/pred_cycle_puv.jpg differ
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/src/__init__.py b/MindFlow/applications/data_driven/fluid_structure_interaction/src/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..64aa1f57e8f6e4d78af549f3acedebc6caf9192d
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/src/__init__.py
@@ -0,0 +1,27 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""init"""
+from .dataset import TrainDatasetSource, generate_dataset, my_test_dataset
+from .model import AEnet
+from .utils import save_loss_curve
+
+__all__ = [
+ "TrainDatasetSource",
+ "generate_dataset",
+ "my_test_dataset",
+ "AEnet",
+ "save_loss_curve"
+]
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/src/conv_lstm.py b/MindFlow/applications/data_driven/fluid_structure_interaction/src/conv_lstm.py
new file mode 100644
index 0000000000000000000000000000000000000000..94c5f67511f46734b9b0498c4acfeb4dc4e4757a
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/src/conv_lstm.py
@@ -0,0 +1,200 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""hybrid deep neural network structure"""
+from mindspore import nn, ops, numpy, float32
+
+
+class ConvLSTMCell(nn.Cell):
+ """
+ The cell of ConvLSTM, which sequentially processes input data through convolution, regularization, LSTM operations
+ """
+
+ def __init__(self, input_dim, hidden_dim, kernel_size, bias):
+ """
+ Initialize ConvLSTM cell.
+ Parameters
+ ----------
+ input_dim: int
+ Number of channels of input tensor.
+ hidden_dim: int
+ Number of channels of hidden state.
+ kernel_size: (int, int)
+ Size of the convolutional kernel.
+ bias: bool
+ Whether or not to add the bias.
+ """
+ super(ConvLSTMCell, self).__init__()
+
+ self.input_dim = input_dim
+ self.hidden_dim = hidden_dim
+ self.kernel_size = kernel_size
+ self.bias = bias
+
+ self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
+ out_channels=4 * self.hidden_dim,
+ kernel_size=self.kernel_size,
+ stride=1,
+ pad_mode="same",
+ padding=0,
+ has_bias=self.bias,
+ data_format="NCHW")
+ self.norm = nn.BatchNorm2d(4 * self.hidden_dim)
+
+ def construct(self, input_tensor, cur_state):
+ """
+ Transform the input_tensor and cur_state, perform convolution and regularization, then perform LSTM operations
+ """
+ h_cur, c_cur = cur_state
+
+ combined = ops.concat(input_x=(input_tensor, h_cur), axis=1)
+ combined_conv = self.conv(combined)
+ combined_conv = self.norm(combined_conv)
+ cc_i, cc_f, cc_o, cc_g = ops.split(input_x=combined_conv, axis=1, output_num=4)
+
+ i = ops.sigmoid(cc_i)
+ f = ops.sigmoid(cc_f)
+ o = ops.sigmoid(cc_o)
+ g = ops.tanh(cc_g)
+
+ c_next = f * c_cur + i * g
+ h_next = o * ops.tanh(c_next)
+
+ return h_next, c_next
+
+ def init_hidden(self, batch_size, image_size, h_ini, c_ini):
+ """
+ Initial state tensor initialization. State tensor 0 initialization for the first timestamp
+ Parameters
+ ----------
+ batch_size: int
+ Minimum batch size of trained samples
+ image_size: tuple of size[H,W]
+ Height and width of data images
+ """
+ height, width = image_size
+ h_ini = numpy.reshape(h_ini, (batch_size, 1, 1, 1))
+ h_ini = numpy.broadcast_to(h_ini, (batch_size, self.hidden_dim, height, width))
+
+ c_ini = numpy.reshape(c_ini, (batch_size, 1, 1, 1))
+ c_ini = numpy.broadcast_to(c_ini, (batch_size, self.hidden_dim, height, width))
+
+ init_h = h_ini * numpy.ones(shape=(batch_size, self.hidden_dim, height, width)).astype(float32)
+ init_c = c_ini * numpy.ones(shape=(batch_size, self.hidden_dim, height, width)).astype(float32)
+
+ return (init_h, init_c)
+
+
+class ConvLSTM(nn.Cell):
+ """
+ Parameters:
+ input_dim: Number of channels in input
+ hidden_dim: Number of hidden channels
+ kernel_size: Size of kernel in convolutions
+ num_layers: Number of LSTM layers stacked on each other
+ batch_first: Whether or not dimension 0 is the batch or not
+ bias: Bias or no bias in Convolution
+ """
+
+ def __init__(self, input_dim, hidden_dim, kernel_size, num_layers, batch_first=False, bias=True):
+ super(ConvLSTM, self).__init__()
+
+ self._check_kernel_size_consistency(kernel_size)
+
+ kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
+ hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
+
+ if not len(kernel_size) == len(hidden_dim) == num_layers:
+ raise ValueError("Inconsistent list length.")
+
+ self.input_dim = input_dim
+ self.hidden_dim = hidden_dim
+ self.kernel_size = kernel_size
+ self.num_layers = num_layers
+ self.batch_first = batch_first
+ self.bias = bias
+
+ cell_list = []
+ for i in range(0, self.num_layers):
+ cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
+ cell_list.append(ConvLSTMCell(input_dim=cur_input_dim,
+ hidden_dim=self.hidden_dim[i],
+ kernel_size=self.kernel_size[i],
+ bias=self.bias))
+ self.cell_list = nn.CellList(cell_list)
+
+ @staticmethod
+ def _check_kernel_size_consistency(kernel_size):
+ """Detect the input kernel_ Does the size meet the requirements and require a kernel_size is list or tuple"""
+ if not (isinstance(kernel_size, tuple) or
+ (isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
+ raise ValueError("'kernel_size' must be tuple or list of tuples")
+
+ @staticmethod
+ def _extend_for_multilayer(param, num_layers):
+ """Expanding to multi-layer LSTM scenarios"""
+ if not isinstance(param, list):
+ param = [param] * num_layers
+ return param
+
+ def construct(self, input_tensor, h0, c0):
+ """
+ Parameters
+ :param input_tensor: 5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
+ :param h0: initial hidden state
+ :param c0: initial cell status
+ """
+ if not self.batch_first:
+ input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
+
+ b, _, _, h, w = input_tensor.shape
+ hidden_state = self._init_hidden(batch_size=b, image_size=(h, w), h_ini=h0, c_ini=c0)
+
+ layer_output_list = []
+ last_state_list = []
+
+ seq_len = input_tensor.shape[1]
+ cur_layer_input = input_tensor
+
+ for layer_idx in range(self.num_layers):
+ h, c = hidden_state[layer_idx]
+ output_inner = []
+
+ for t in range(seq_len):
+ h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :], cur_state=[h, c])
+ output_inner.append(h)
+
+ layer_output = ops.stack(output_inner, axis=1)
+ cur_layer_input = layer_output
+
+ layer_output_list.append(layer_output)
+ last_state_list.append([h, c])
+
+ return h
+
+ def _init_hidden(self, batch_size, image_size, h_ini, c_ini):
+ """
+ Initialize the input state 0 of the first timestamp of all LSTM layers
+ Parameters
+ ----------
+ batch_size: int
+ Minimum batch size of trained samples
+ image_size: tuple of size[H,W]
+ Height and width of data images
+ """
+ init_states = []
+ for i in range(self.num_layers):
+ init_states.append(self.cell_list[i].init_hidden(batch_size, image_size, h_ini, c_ini))
+ return init_states
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/src/dataset.py b/MindFlow/applications/data_driven/fluid_structure_interaction/src/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2d0bcc64c3891eacdb3791fb7239e8562ea799d
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/src/dataset.py
@@ -0,0 +1,181 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Loading data and create dataset"""
+from collections import namedtuple
+
+import numpy as np
+import h5py
+
+import mindspore.dataset as ds
+
+
+class TrainDatasetSource:
+ """
+ Indexing the dataset based on data_dir and dataset_list, processing the dataset and returning train_dataset,
+ train_velocity, train_ur, valid_dataset, valid_velocity and valid_ur.
+ Parameters:
+ data_dir: Path address of the dataset
+ dataset_list: The train data list:['5.0', '5.5', '6.0', '6.5']
+ """
+
+ def __init__(self, data_dir, dataset_list, ratio=0.8):
+ self.data_dir = data_dir
+ self.dataset_list = dataset_list
+ self.ratio = ratio
+
+ def train_data(self):
+ """ data for train"""
+ train_dataset = []
+ valid_dataset = []
+ train_velocity = []
+ valid_velocity = []
+ train_ur = []
+ valid_ur = []
+
+ for i in self.dataset_list:
+ data_source = h5py.File(f"{self.data_dir}/Ur{i}/total_puv.mat")
+ data_sample = data_source['total_puv'][:, :, :, 2:]
+ data_sample = np.array(data_sample.transpose([0, 3, 1, 2]), np.float32)
+
+ data_length = data_sample.shape[0]
+ train_dataset.append(data_sample[0:int(data_length * self.ratio)])
+ valid_dataset.append(data_sample[int(data_length * self.ratio):])
+
+ data_source = h5py.File(f"{self.data_dir}/ur{i}/velocity.mat")
+ data_velocity = data_source['velocity'][:]
+ data_velocity = np.array(data_velocity, np.float32)
+
+ train_velocity.append(data_velocity[0:int(data_length * self.ratio)])
+ valid_velocity.append(data_velocity[int(data_length * self.ratio):])
+
+ ur = np.array(float(i), np.float32)
+ ur_01 = ur / 10.0
+ data_ur = ur_01 * np.ones(data_length, dtype=np.float32)
+
+ train_ur.append(data_ur[0:int(data_length * self.ratio)])
+ valid_ur.append(data_ur[int(data_length * self.ratio):])
+
+ DatasetResult = namedtuple('DatasetResult',
+ ['train_dataset', 'train_velocity', 'train_ur', 'valid_dataset', 'valid_velocity',
+ 'valid_ur'])
+
+ return DatasetResult(train_dataset, train_velocity, train_ur, valid_dataset, valid_velocity, valid_ur)
+
+
+class TrainDatasetMake:
+ """
+ According dataset, ur and time_steps to make train dataset so that retrieve data based on index.
+ Parameters:
+ dataset: Train data and valid data
+ velocity: The speed of the moving structure
+ ur: Calculation conditions used as frequency
+ time_steps: The number of time steps to predict
+ """
+
+ def __init__(self, dataset, velocity, ur, time_steps, dataset_list):
+ self.dataset = dataset
+ self.velocity = velocity
+ self.ur = ur
+ self.time_steps = time_steps
+ self.dataset_numbers = len(dataset_list)
+
+ def __len__(self):
+ return (len(self.dataset[0]) - 2 * self.time_steps) * self.dataset_numbers
+
+ def __getitem__(self, idx):
+ idx_dataset = idx // (len(self.dataset[0]) - 2 * self.time_steps)
+ idx = idx % (len(self.dataset[0]) - 2 * self.time_steps)
+
+ train_input = self.dataset[idx_dataset][idx:idx + 2 * self.time_steps:2]
+ train_velocity = self.velocity[idx_dataset][idx + 2 * (self.time_steps - 1)]
+ train_ur = self.ur[idx_dataset][idx + 2 * (self.time_steps - 1)]
+ train_label = self.dataset[idx_dataset][idx + 2 * self.time_steps]
+
+ TrainDatasetResult = namedtuple('TrainDatasetResult',
+ ['train_input', 'train_velocity', 'train_ur', 'train_label'])
+
+ return TrainDatasetResult(train_input, train_velocity, train_ur, train_label)
+
+
+def generate_dataset(data_dir, time_steps, dataset_list):
+ """According data_dir, time_steps and dataset_list to process and generate train_dataset, valid_dataset"""
+ train_data, train_velocity, train_ur, valid_data, valid_velocity, valid_ur = TrainDatasetSource \
+ (data_dir, dataset_list).train_data()
+
+ train_dataset = TrainDatasetMake(train_data, train_velocity, train_ur, time_steps, dataset_list)
+ train_dataset = ds.GeneratorDataset(train_dataset, ["inputs", "v", "ur", "labels"], shuffle=True)
+ train_dataset = train_dataset.batch(batch_size=16, drop_remainder=True)
+
+ valid_dataset = TrainDatasetMake(valid_data, valid_velocity, valid_ur, time_steps, dataset_list)
+ valid_dataset = ds.GeneratorDataset(valid_dataset, ["inputs", "v", "ur", "labels"], shuffle=False)
+ valid_dataset = valid_dataset.batch(batch_size=16, drop_remainder=True)
+
+ return train_dataset, valid_dataset
+
+
+class TestDatasetMake:
+ """
+ According dataset, velocity, ur and time_steps to make dataset so that retrieve data based on index.
+ Parameters:
+ dataset: Train data and valid data
+ velocity: The speed of the moving structure
+ ur: Calculation conditions used as frequency
+ time_steps: The number of time steps to predict
+ """
+
+ def __init__(self, dataset, velocity, ur, time_steps):
+ self.dataset = dataset
+ self.velocity = velocity
+ self.ur = ur
+ self.time_steps = time_steps
+
+ def __len__(self):
+ return (len(self.dataset) - 2 * self.time_steps) // 2
+
+ def __getitem__(self, idx):
+ test_input = self.dataset[2 * idx:2 * idx + 2 * self.time_steps:2]
+ test_velocity = self.velocity[2 * idx + 2 * (self.time_steps - 1)]
+ test_ur = self.ur[2 * idx + 2 * (self.time_steps - 1)]
+ test_label = self.dataset[2 * idx + 2 * self.time_steps]
+
+ TestDatasetResult = namedtuple('TestDatasetResult',
+ ['test_input', 'test_velocity', 'test_ur', 'test_label'])
+
+ return TestDatasetResult(test_input, test_velocity, test_ur, test_label)
+
+
+def my_test_dataset(data_dir, time_steps, dataset_list):
+ """According data_dir, time_steps and time_steps to process and generate test_dataset"""
+ data_source = h5py.File(f"{data_dir}/ur{dataset_list[0]}/total_puv.mat")
+ data_sample = data_source['total_puv'][800:2000, :, :, :]
+ test_data = np.array(data_sample[:, :, :, 2:].transpose([0, 3, 1, 2]), np.float32)
+
+ surf_xy = data_sample[:, :, 0, 0:2]
+
+ data_source = h5py.File(f"{data_dir}/ur{dataset_list[0]}/velocity.mat")
+ data_sample = data_source['velocity'][800:2000]
+ test_velocity = np.array(data_sample, np.float32)
+
+ data_length = test_data.shape[0]
+
+ # normalize in the 0-10 range
+ ur = float(dataset_list[0])
+ ur_01 = ur / 10.0
+ test_ur = ur_01 * np.ones(data_length, dtype=np.float32)
+
+ test_dataset = TestDatasetMake(test_data, test_velocity, test_ur, time_steps)
+
+ return test_dataset, surf_xy
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/src/model.py b/MindFlow/applications/data_driven/fluid_structure_interaction/src/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..b31b843b5f628a90c1445e716a3edd406fe046c0
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/src/model.py
@@ -0,0 +1,152 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""hybrid deep neural network structure"""
+from mindspore import nn, ops
+
+from .conv_lstm import ConvLSTM
+
+
+class Encoder(nn.Cell):
+ """The Convolutional layer (12 layers) of Hybrid Deep Neural Network
+
+ Args:
+ in_channels (int): The number of channels in the input space.
+ num_layers (int): The number of Convolutional layer.
+ kernel_size(int): The size of Convolutional kernel in Convolutional layer.
+ has_bias(bool): Whether set bias for Convolutional layer.
+ weight_init(str): The way to perform weight initialization operation.
+ Input:
+ A tensor of size [B, C, H, W] = [16, 3, 192, 128]
+ Output:
+ A tensor of size [B, C, H, W] = [16, 128, 3, 2]
+ Example:
+ >> encoder = Encoder(in_channels=3, num_layers=12, kernel_size=4, has_bias=True, weight_init='XavierUniform')
+ """
+
+ def __init__(self, in_channels, num_layers, kernel_size, has_bias=True, weight_init='XavierUniform',
+ activation=nn.LeakyReLU()):
+ super(Encoder, self).__init__()
+
+ layers = []
+ for num in range(1, num_layers + 1):
+ if num == 1:
+ layers.extend([nn.Conv2d(in_channels, 2 ** (num + 1), kernel_size, stride=2, padding=0, pad_mode='same',
+ has_bias=has_bias, weight_init=weight_init, data_format='NCHW'), activation])
+ elif num % 2 == 0:
+ layers.extend([nn.Conv2d(int(2 ** (num / 2 + 1)), int(2 ** (num / 2 + 1)), kernel_size - 1, stride=1,
+ padding=0, pad_mode='same', has_bias=has_bias, weight_init=weight_init,
+ data_format='NCHW'), activation])
+ elif num % 2 == 1:
+ layers.extend([nn.Conv2d(int(2 ** ((num + 1) / 2)), int(2 ** ((num + 3) / 2)), kernel_size, stride=2,
+ padding=0, pad_mode='same', has_bias=has_bias, weight_init=weight_init,
+ data_format='NCHW'), activation])
+ self.convlayers = nn.SequentialCell(layers)
+
+ def construct(self, x):
+ x = self.convlayers(x)
+ return x
+
+
+class Decoder(nn.Cell):
+ """The DeConvolutional layer (12 layers) of Hybrid Deep Neural Network
+
+ Args:
+ in_channels (int): The number of channels in the input space.
+ num_layers (int): The number of DeConvolutional layer.
+ kernel_size(int): The size of DeConvolutional kernel in DeConvolutional layer.
+ weight_init(str): The way to perform weight initialization operation.
+ Input:
+ A tensor of size [B, C, H, W] = [16, 128, 3, 2]
+ Output:
+ A tensor of size [B, C, H, W] = [16, 3, 192, 128]
+
+ Example:
+ >> Decoder = Decoder(in_channels=128, num_layers=12, kernel_size=4, weight_init='XavierUniform')
+ """
+
+ def __init__(self, in_channels, num_layers, kernel_size, weight_init='XavierUniform', activation=nn.LeakyReLU()):
+ super(Decoder, self).__init__()
+
+ layers = []
+ for num in range(1, num_layers + 1):
+ if num == num_layers:
+ layers.extend(
+ [nn.Conv2d(in_channels, in_channels, kernel_size + 1, weight_init=weight_init, stride=1,
+ pad_mode='same', padding=0), activation])
+ elif num == num_layers - 1:
+ layers.extend([nn.Conv2dTranspose(in_channels + 1, in_channels, kernel_size, stride=2, pad_mode='same',
+ padding=0), activation])
+ elif num % 2 == 1:
+ layers.extend([nn.Conv2dTranspose(int(2 ** ((15 - num) / 2)), int(2 ** ((13 - num) / 2)), kernel_size,
+ stride=2, padding=0, pad_mode='same', weight_init=weight_init),
+ activation])
+ elif num % 2 == 0:
+ layers.extend([nn.Conv2d(int(2 ** ((14 - num) / 2)), int(2 ** ((14 - num) / 2)), kernel_size - 1,
+ stride=1, padding=0, pad_mode='same', weight_init=weight_init), activation])
+ self.deconv_layers = nn.SequentialCell(layers)
+
+ def construct(self, x):
+ x = self.deconv_layers(x)
+ return x
+
+
+class AEnet(nn.Cell):
+ r"""
+ A Hybrid Deep Neural Network Composed of Convolutional Layer, ConvLSTM, and Deconvolutional Layer
+
+ Args:
+ in_channels (int): The number of channels in the input space.
+ num_layers (int): The number of Convolutional and DeConvolutional layer.
+ kernel_size(int): The size of convolutional kernel in Convolutional and DeConvolutional layer.
+ num_convlstm_layers (int): The number of ConvLSTM Layer.
+
+ Inputs:
+ - **input** (Tensor) - Tensor of shape :math:`(*, in\_channels)`.
+
+ Outputs:
+ Tensor of shape :math:`(*, in\_channels)`.
+ """
+
+ def __init__(self,
+ in_channels,
+ num_layers,
+ kernel_size,
+ num_convlstm_layers):
+ super(AEnet, self).__init__()
+ self.encoder = Encoder(in_channels=in_channels, num_layers=num_layers, kernel_size=kernel_size)
+ self.convlstm = ConvLSTM(input_dim=128, hidden_dim=128, kernel_size=(3, 3), num_layers=num_convlstm_layers,
+ batch_first=True, bias=True)
+ self.decoder = Decoder(in_channels=in_channels, num_layers=num_layers, kernel_size=kernel_size)
+
+ def construct(self, x, velocity, ur):
+ """
+ Unpacking the input data x in five dimensions, passing through the reshape, and inputting it into the
+ convolutional layer; Then send the output reshape and velocity to ConvLSTM; Then input the output result into
+ the deconvolution layer and output the final result
+ """
+ b, t, c, h, w = x.shape
+
+ con_in = ops.reshape(x, (b * t, c, h, w))
+
+ con_out = self.encoder(con_in)
+
+ con_out = con_out.reshape(b, t, con_out.shape[1], con_out.shape[2], con_out.shape[3])
+
+ lstm_out = self.convlstm(con_out, velocity, ur)
+
+ out = self.decoder(lstm_out)
+
+ return out
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/src/utils.py b/MindFlow/applications/data_driven/fluid_structure_interaction/src/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..64200086c06a8dbf82d5f1b679f9b865771e60f9
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/src/utils.py
@@ -0,0 +1,27 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""draw the curve of the loss value changing with epoch, set the coordinate axis name, title and save the path"""
+import matplotlib.pyplot as plt
+
+
+def save_loss_curve(losses, xlabel, ylabel, title, save_path):
+ """draw and save curves of training loss and testing loss"""
+ plt.plot(losses)
+ plt.xlabel(xlabel)
+ plt.ylabel(ylabel)
+ plt.title(title)
+ plt.savefig(save_path)
+ plt.close()
diff --git a/MindFlow/applications/data_driven/fluid_structure_interaction/train.py b/MindFlow/applications/data_driven/fluid_structure_interaction/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b6e36eed06d7a2b2b7cb401ceeed0f53f6f0743
--- /dev/null
+++ b/MindFlow/applications/data_driven/fluid_structure_interaction/train.py
@@ -0,0 +1,185 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train process"""
+import os
+import time
+import argparse
+import numpy as np
+
+from mindspore import nn, ops, context, save_checkpoint, set_seed, data_sink, jit
+
+from mindflow.utils import load_yaml_config
+
+from src import generate_dataset, AEnet, save_loss_curve
+
+np.random.seed(0)
+set_seed(0)
+
+
+def train():
+ """train process"""
+ # prepare params
+ config = load_yaml_config(args.config_file_path)
+ data_params = config["data"]
+ model_params = config["model"]
+ optimizer_params = config["optimizer"]
+
+ # prepare file to save the trained model files
+ ckpt_dir = optimizer_params["ckpt_dir"]
+ if not os.path.exists(ckpt_dir):
+ os.mkdir(ckpt_dir)
+
+ # prepare the model to be trained, as well as loss function:MSE and optimizer:Adam
+ model = AEnet(in_channels=model_params["in_channels"],
+ num_layers=model_params["num_layers"],
+ kernel_size=model_params["kernel_size"],
+ num_convlstm_layers=model_params["num_convlstm_layers"])
+
+ loss_func = nn.MSELoss()
+ optimizer = nn.Adam(params=model.trainable_params(), learning_rate=optimizer_params["lr"])
+
+ # when using Ascend for training, introducing dynamic loss scaler and automatic mixed accuracy training methods
+ if use_ascend:
+ from mindspore.amp import DynamicLossScaler, auto_mixed_precision, all_finite
+ loss_scaler = DynamicLossScaler(1024, 2, 100)
+ auto_mixed_precision(model, 'O1')
+ else:
+ loss_scaler = None
+
+ # define a forward propagation function
+ def forward_fn(inputs, velocity, ur, label):
+ pred = model(inputs, velocity, ur)
+ loss = loss_func(pred, label)
+
+ if use_ascend:
+ loss = loss_scaler.scale(loss)
+ return loss
+
+ # calculate function forward_ Fn and return the value and gradient of the function
+ grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=False)
+
+ # prepare dataset
+ print(f"==================Load data sample ===================")
+ dataset_train, dataset_eval = generate_dataset(data_params["data_dir"],
+ data_params["time_steps"],
+ args.data_list)
+ print(f"======================End Load========================\n")
+
+ print(f"====================Start train=======================")
+
+ # define a function decorated with @jit to perform training steps, which calls and saves the function to calculate
+ # loss values and gradients. Using decorators can improve the execution efficiency of functions
+ @jit
+ def train_step(inputs, velocity, ur, label):
+ loss, grads = grad_fn(inputs, velocity, ur, label)
+ if use_ascend:
+ loss = loss_scaler.unscale(loss)
+ if all_finite(grads):
+ grads = loss_scaler.unscale(grads)
+ loss = ops.depend(loss, optimizer(grads))
+ return loss
+
+ @jit
+ def eval_step(inputs, velocity, ur, label):
+ loss = forward_fn(inputs, velocity, ur, label)
+ loss = ops.sqrt(loss)
+ return loss
+
+ # define train_sink_process and eval_sink_process,obtain data from the dataset, preprocess it and input it into the
+ # training steps for model training
+ train_sink_process = data_sink(train_step, dataset_train, sink_size=1)
+ eval_sink_process = data_sink(eval_step, dataset_eval, sink_size=1)
+ train_data_size, eval_data_size = dataset_train.get_dataset_size(), dataset_eval.get_dataset_size()
+
+ # average training loss per epoch
+ avg_train_losses = []
+ # average validation loss per epoch
+ avg_valid_losses = []
+
+ # start epoch training
+ for epoch in range(1, optimizer_params["epochs"] + 1):
+ train_losses = 0
+ valid_losses = 0
+
+ local_time_beg = time.time()
+ model.set_train(True)
+
+ for _ in range(train_data_size):
+ step_train_loss = ops.squeeze(train_sink_process(), axis=())
+ step_train_loss = step_train_loss.asnumpy().item()
+ train_losses += step_train_loss
+
+ train_loss = train_losses / train_data_size
+ avg_train_losses.append(train_loss)
+
+ print(f"epoch: {epoch}, epoch average train loss: {train_loss :.6f}, "
+ f"epoch time: {(time.time() - local_time_beg):.2f}s")
+
+ if epoch % optimizer_params["eval_interval"] == 0:
+ print(f"=================Start Evaluation=====================")
+
+ eval_time_beg = time.time()
+ model.set_train(False)
+ for _ in range(eval_data_size):
+ step_eval_loss = ops.squeeze(eval_sink_process(), axis=())
+ step_eval_loss = step_eval_loss.asnumpy().item()
+ valid_losses += step_eval_loss
+
+ valid_loss = valid_losses / eval_data_size
+ avg_valid_losses.append(valid_loss)
+
+ print(f"epoch: {epoch}, epoch average valid loss: {valid_loss :.6f}, "
+ f"epoch time: {(time.time() - eval_time_beg):.2f}s")
+ print(f"==================End Evaluation======================")
+
+ # save the ckpt file of the trained model in the folder
+ if epoch % optimizer_params["save_ckpt_interval"] == 0:
+ save_checkpoint(model, f"{ckpt_dir}/net_{epoch}.ckpt")
+
+ # draw and save curves of training loss and testing loss
+ save_loss_curve(avg_train_losses, 'Epoch', 'avg_train_losses', 'Avg_train_losses Curve', 'Avg_train_losses.png')
+ save_loss_curve(avg_valid_losses, 'Epoch', 'avg_valid_losses', 'Avg_valid_losses Curve', 'Avg_valid_losses.png')
+
+ print(f"=====================End train========================")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="cylinder around flow ROM")
+
+ parser.add_argument("--config_file_path", type=str, default="./config.yaml")
+ parser.add_argument("--data_list", type=list, default=['5.0', '5.5', '6.0', '6.5'], help="The type for training")
+ parser.add_argument('--batch_size', type=int, default=32, help="batch size")
+ parser.add_argument("--mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
+ help="Context mode, support 'GRAPH', 'PYNATIVE'")
+ parser.add_argument("--save_graphs", type=bool, default=False, choices=[True, False],
+ help="Whether to save intermediate compilation graphs")
+ parser.add_argument("--save_graphs_path", type=str, default="./summary")
+ parser.add_argument("--device_target", type=str, default="Ascend", choices=["GPU", "Ascend"],
+ help="The target device to run, support 'GPU','Ascend'")
+ parser.add_argument("--device_id", type=int, default=0, help="ID of the target device")
+
+ args = parser.parse_args()
+
+ context.set_context(mode=context.GRAPH_MODE if args.mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
+ save_graphs=args.save_graphs, save_graphs_path=args.save_graphs_path,
+ device_target=args.device_target, device_id=args.device_id)
+ use_ascend = context.get_context(attr_key='device_target') == "Ascend"
+
+ print("Process ID:", os.getpid())
+ print(f"device id: {args.device_id}")
+ start_time = time.time()
+ train()
+ print(f"End-to-End total time: {(time.time() - start_time):.2f}s")
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/README.md b/MindFlow/applications/data_driven/move_boundary_hdnn/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..13be2cc58610f4506733f4461f29ad75be73f36b
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/README.md
@@ -0,0 +1,116 @@
+---
+
+# A Neural Network Based Method for Predicting Unsteady Flow Fields with Moving Boundaries
+
+## 0verview
+
+### Background
+
+Simulating unsteady flow field systems with moving boundaries to analyze the force acting on moving structures in the
+flow field can optimize the design of moving structures in engineering. High precision computational fluid dynamics (
+CFD) technology can accurately simulate the flow field evolution and structural stress, but high precision dynamic
+boundary problems require many grids, resulting in huge computing costs and extremely time-consuming dynamic grid
+construction. Deep neural networks can learn the evolutionary relationship between flow conditions and flow fields, and
+quickly achieve high-precision reconstruction of flow fields. In order to efficiently solve the problem of
+reconstructing dynamic boundary flow fields, a hybrid deep neural network (HDNN) is proposed to achieve unsteady dynamic
+boundary flow field reconstruction and fast prediction of flow fields based on this. The relevant dimensions of the flow
+field are shown in the figure, where $$Y = Asin(2\pi ft)$$ represents the motion expression of the cylinder Simple harmonic
+motion in the vertical direction, A is the amplitude, and f is the frequency; D represents the diameter of the cylinder;
+The rectangular boundary represents the computational domain.
+
+
+
+
+
+### Method
+
+The moving boundary deep neural network model (HDNN) in this article is built through the new generation open-source
+deep learning framework MindSpore, which can efficiently complete model construction and training through its model
+optimization, inference acceleration, and other functions.The basic framework of HDNN is mainly based on previous work:
+a hybrid deep neural network framework [paper](https://doi.org/10.1016/j.ast.2022.107636) ,It is mainly composed of
+convolutional neural network (CNN), convolutional long short-term memory network (ConvLSTM) and deconvolution neural
+network (DeCNN). CNN reduces the dimensionality of the time series flow field and achieves feature extraction; ConvLSTM
+learns low dimensional spatiotemporal features and makes predictions; Finally, DeCNN achieves reconstruction of
+predicted flow fields.
+
++ Input layer: Input historical flow field;
++ Convolutional layer: By using multi-layer CNN to reduce the dimensionality of the input flow field and extract
+ high-dimensional spatiotemporal flow characteristics;
++ Memory layer: Learn the evolution of spatiotemporal characteristics of low dimensional spatial flow fields through
+ ConvLSTM and predict the next moment;
++ Deconvolution output layer: Restores the low-dimensional features of the predicted flow field to high-dimensional
+ space, reconstructs the transient flow field at the next moment through multi-layer DeCNN, and outputs visual
+ prediction results
+
+
+
+
+
+### Dataset
+
++ Dataset directory: The total directory of moving boundary datasets is: forced_move, which has a total of 8
+ subdirectories named f0.90h0.i, with i being 00, 20, 25, 30, 35, 40, 45, and 50, representing different amplitude
+ sizes. The small directory (project) under each subdirectory contains Matrix_01.mat, total_puv_project.mat and
+ velocity.mat represent mat files containing matrices of 0 or 1, pressure velocity data, and cylindrical motion
+ velocity, respectively
+
++ Source: Compute the flow field through CFD simulation, specifically using ANSYS Fluent to obtain numerical simulation
+ flow field data of unsteady two-dimensional cylindrical flow
+
++ Establishment method: The physical quantities related to the flow field are: Reynolds number Re=200, cylinder diameter
+ D=0.02m, time step t=0.01s. The cylinder undergoes one-dimensional harmonic motion in the vertical direction, with
+ vibration frequencies (Hz) of 1.25, 1.43, 1.67, and 2.00, and amplitudes (A/D) of 0.5, 0.6, 0.7, and 0.8,
+ respectively. A total of 16 sets of motion states are formed by pairing. Physical modeling of two-dimensional
+ cylindrical flow field, mesh discretization/partitioning, and solving control equations to obtain flow field
+ information. Dimensionalize the physical quantities of the flow field and place 128 in the sampling area × 128 grid
+ sampling points to obtain a sample set for training and testing
+
++ Specification:Each flow field snapshot of dataset contains three channels, representing the pressure distribution information, horizontal velocity information, and vertical velocity information of the flow field
+
+### Effect
+
+The trained neural network predicts the flow field under operating conditions with an amplitude of 0.45D. It only takes 17 seconds to obtain a flow field of 150 time steps using the trained neural network, while the CFD solver takes 240 seconds, demonstrating the efficiency of this method.
+
+## Quick Start
+
+### Training Method 1: Call the `train.py` script on the command line
+
+python train.py --mode GRAPH --save_graphs False --save_graphs_path ./summary --device_target Ascend --device_id 0 --data_list ['0.00', '0.25', '0.35', '0.45'] --batch_size 16 --config_file_path ./config.yaml
+
+Among them,
+`--mode` represents the running mode, 'GRAPH' indicates the static Graphical model, 'PYNATIVE' indicates the dynamic Graphical model, default 'GRAPH';
+
+`--save_graphs` represents whether to save the calculation graph, default 'False';
+
+`--save_graphs_path` represents the path where the calculation graph is saved, default './summary';
+
+`--device_target` represents the type of computing platform used, which can be selected as 'Ascend' or 'GPU', default 'Ascend';
+
+`--device_id` represents the calculation card number used, which can be filled in according to the actual situation, default 0;
+
+`--data_list` represents the dataset used for training, defaults ['0.00', '0.25', '0.35', '0.45'];
+
+`--batch_size` represents the number of images that will be sent to the network during each training, default 16;
+
+`--config_file_path` represents the parameter and path control file, default './config.yaml'
+
+### Training Method 2: Running Jupyter Notebook
+
+You can run training and validation code line by line using both the [Chinese version](move_boundary_hdnn_CN.ipynb) and the [English version](move_boundary_hdnn.ipynb) of Jupyter Notebook.
+
+## Results Display
+
+The following figure shows the results of a fully trained HDNN model for one-step prediction and one complete cycle
+prediction of unsteady moving boundaries with a vibration frequency of 1.43Hz and an amplitude of 0.8 (amplitude ratio
+generalization state) (displaying pressure field, horizontal velocity field, and vertical velocity field). Below are the specific default commands to be executed and the input meanings of the startup command.
+
+
+
+## 贡献者
+
+gitee id:[DUJiaoxi](https://gitee.com/ddd000g)
+
+email: dujiaoxi@stu.xjtu.edu.cn
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/config.yaml b/MindFlow/applications/data_driven/move_boundary_hdnn/config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0d3038ce8175086ad40d334b1ca88a006210fb86
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/config.yaml
@@ -0,0 +1,22 @@
+data:
+ data_dir: "./forced_move" # dataset store direction
+ time_steps: 1 # previous time steps used to predict
+
+model:
+ in_channels: 3 # the number of channels in the input space
+ num_layers: 12 # the number of Convolutional and DeConvolutional layer
+ kernel_size: 4 # the size of kernel in Convolutional and DeConvolutional layer
+ num_convlstm_layers: 1 # the number of ConvLSTM layers
+
+optimizer:
+ lr: 0.001 # learning rate
+ epochs: 100 # the number of training epochs
+ eval_interval: 10 # time interval for model evaluation
+ save_ckpt_interval: 10 # save the model once for 10 epochs
+ ckpt_dir: "./ckpt_dir" # the directory where the model files are saved
+
+prediction:
+ data_dir: "./forced_move/f0.90h0.20" # prediction dataset store direction
+ ckpt_path: "./ckpt_dir/net_100.ckpt" # the path of ckpt file used in prediction process
+ prediction_result_dir: "./save_prediction" # the path for predicting results of a single step flow field
+ pred_continue_dir: "./save_prediction_continue" # dtrained model store direction
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/eval.py b/MindFlow/applications/data_driven/move_boundary_hdnn/eval.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ebf8788d6250532c8256569b23ec65efffbae45
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/eval.py
@@ -0,0 +1,120 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Prediction process"""
+import os
+import time
+import argparse
+import numpy as np
+from scipy.io import savemat
+
+from mindspore import nn, ops, load_checkpoint, load_param_into_net, set_seed
+from mindflow.utils import load_yaml_config
+
+from src import my_test_dataset, AEnet, save_loss_curve
+
+np.random.seed(0)
+set_seed(0)
+
+
+def prediction():
+ """Process of prediction with trained net"""
+ # prepare params
+ config = load_yaml_config(args.config_file_path)
+ data_params = config["data"]
+ model_params = config["model"]
+ prediction_params = config["prediction"]
+ prediction_result_dir = prediction_params["prediction_result_dir"]
+ pred_continue_dir = prediction_params["pred_continue_dir"]
+
+ # prepare network
+ net = AEnet(in_channels=model_params["in_channels"],
+ num_layers=model_params["num_layers"],
+ kernel_size=model_params["kernel_size"],
+ num_convlstm_layers=model_params["num_convlstm_layers"])
+ m_state_dict = load_checkpoint(prediction_params["ckpt_path"])
+ load_param_into_net(net, m_state_dict)
+
+ # prepare dataset
+ data_set = my_test_dataset(prediction_params["data_dir"], data_params["time_steps"])
+ if not os.path.exists(prediction_result_dir):
+ os.mkdir(prediction_result_dir)
+ if not os.path.exists(pred_continue_dir):
+ os.mkdir(pred_continue_dir)
+
+ # prepare loss function: MSE loss function
+ loss_func = nn.MSELoss()
+
+ # predicted loss
+ test_losses = []
+
+ # predicting next one-step flow field
+ if args.infer_mode == "one":
+ for i, (input_1, velocity, label, matrix_01) in enumerate(data_set):
+ pred = net(input_1, velocity)
+ pred = ops.mul(pred, matrix_01)
+ loss = ops.sqrt(loss_func(pred, label))
+ test_losses.append(loss)
+ print(f"test loss: {(loss.asnumpy().item()):.6f}")
+ savemat(f"{prediction_result_dir}/prediction_data{i}.mat", {'prediction': pred.asnumpy(),
+ 'real': label.asnumpy(),
+ 'input': input_1.asnumpy()})
+
+ # predicting a complete periodic flow field
+ elif args.infer_mode == "cycle":
+ for i, (inputvar, velocityvar, targetvar, matrix_01) in enumerate(data_set):
+ if i == 0:
+ inputs = inputvar
+ label = targetvar
+ velocity = velocityvar
+ pred = net(inputs, velocity)
+ pred = ops.mul(pred, matrix_01)
+ loss = ops.sqrt(loss_func(pred, label))
+ loss_aver = loss.asnumpy().item()
+
+ # Record training loss
+ test_losses.append(loss_aver)
+ print(f"test loss: {loss_aver:.6f}")
+ savemat(f"{pred_continue_dir}/prediction_data{i}.mat", {'prediction': pred.asnumpy(),
+ 'real': label.asnumpy(),
+ 'inputs': inputs.asnumpy()})
+ # Splicing predicted values as input for the next step
+ pred = ops.operations.ExpandDims()(pred, 1)
+ cat = ops.concat((inputs, pred), axis=1)
+ inputs = cat[:, 1:, :, :, :]
+
+ # draw and save curves of test losses
+ save_loss_curve(test_losses, 'Epoch', 'test_losses', 'Test_losses Curve', 'Test_losses.png')
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="cylinder around flow ROM")
+
+ parser.add_argument("--mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
+ help="Context mode, support 'GRAPH', 'PYNATIVE'")
+ parser.add_argument("--device_target", type=str, default="GPU", choices=["GPU", "Ascend"],
+ help="The target device to run, support 'Ascend', 'GPU'")
+ parser.add_argument("--device_id", type=int, default=0, help="ID of the target device")
+ parser.add_argument("--config_file_path", type=str, default="./config.yaml")
+ parser.add_argument("--infer_mode", type=str, default="one", choices=["one", "cycle"],
+ help="The mode to predict next one-step flow field or a complete periodic flow field")
+
+ args = parser.parse_args()
+
+ print("Process ID:", os.getpid())
+ print(f"device id: {args.device_id}")
+ start_time = time.time()
+ prediction()
+ print(f"End-to-End total time: {(time.time() - start_time):.2f}s")
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/images/HDNN.jpg b/MindFlow/applications/data_driven/move_boundary_hdnn/images/HDNN.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..53f5a41c8af66915de1c1063a69eef297779874e
Binary files /dev/null and b/MindFlow/applications/data_driven/move_boundary_hdnn/images/HDNN.jpg differ
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/images/p1.png b/MindFlow/applications/data_driven/move_boundary_hdnn/images/p1.png
new file mode 100644
index 0000000000000000000000000000000000000000..9b023470d38438bc25142e2a5f2d101cbe872882
Binary files /dev/null and b/MindFlow/applications/data_driven/move_boundary_hdnn/images/p1.png differ
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/images/pred_cycle_puv.jpg b/MindFlow/applications/data_driven/move_boundary_hdnn/images/pred_cycle_puv.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..24141de68759fe2eaf6bd8f7c69edefe2313758c
Binary files /dev/null and b/MindFlow/applications/data_driven/move_boundary_hdnn/images/pred_cycle_puv.jpg differ
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/images/pred_single_step_puv.jpg b/MindFlow/applications/data_driven/move_boundary_hdnn/images/pred_single_step_puv.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..6ce9f71f826b01d9cc8b0b83e84a10c0c80d995c
Binary files /dev/null and b/MindFlow/applications/data_driven/move_boundary_hdnn/images/pred_single_step_puv.jpg differ
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/move_boundary_hdnn.ipynb b/MindFlow/applications/data_driven/move_boundary_hdnn/move_boundary_hdnn.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..c9b2e8f29c6755b36dd8734b5f19eee297bbb0f9
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/move_boundary_hdnn.ipynb
@@ -0,0 +1,480 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "fda19887",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# Prediction of Unsteady Flow Field with Moving Boundary\n",
+ "\n",
+ "## Overview\n",
+ "\n",
+ "As an important tool to simulate and analyze fluid motion through numerical methods, CFD greatly facilitates the scientific research of fluid mechanics related issues, and plays an important role in providing accurate data and insights in the fields of design, optimization and research. One of the representative and research value problems in fluid mechanics is to simulate the unsteady flow field system with moving boundary to analyze the force of the moving structure in the flow field, which can optimize the design of the moving structure in engineering, and provide scheme strategies for the shape optimization of aerospace vehicles and navigation vehicles. High precision computational fluid dynamics (CFD) can accurately simulate the evolution of flow field and the stress of structure, but the high-precision dynamic boundary problem requires a large number of grids, which leads to huge hardware consumption and computational time cost. In addition, the construction of dynamic grids is also particularly time-consuming.\n",
+ "\n",
+ "When CFD is applied to complex problems, the amount of calculation is huge and the calculation accuracy needs to be improved. An effective solution is given in the field of intelligent fluid mechanics. Deep learning can learn the evolution relationship between flow conditions and flow field through deep neural network, and quickly realize high-precision prediction and reconstruction of flow field. In order to efficiently solve the problem of reconstructing the flow field at the moving boundary, a hybrid depth neural network (hdnn) is proposed to reconstruct the unsteady flow field at the moving boundary, and fast predict the flow field based on it.\n",
+ "\n",
+ "## Problem description\n",
+ "\n",
+ "The relevant dimensions of the flow field are shown in the figure, where $Y = Asin(2πft)$ represents the motion expression of the cylinder in a simple harmonic motion in the vertical direction, a is the amplitude, f is the frequency; D stands for cylinder diameter; The rectangular boundary represents the computational domain. When the uniform incoming flow flows through a moving cylinder, under the influence of the interaction between the fluid and the solid, a series of complex flow phenomena will be formed behind the cylinder, such as boundary layer separation, alternating Karman vortex street and so on, and evolve into a non-uniform flow field whose physical quantities change periodically with time.\n",
+ "\n",
+ "## Technology path\n",
+ "\n",
+ "The specific process of mindflow to solve this problem is as follows:\n",
+ "\n",
+ "1.Create data sets based on CFD numerical simulation results.\n",
+ "\n",
+ "2.The model is built using mindspire deep learning framework.\n",
+ "\n",
+ "3.Define the optimizer and loss function.\n",
+ "\n",
+ "4.Use mindspire's instant compilation to accelerate model training.\n",
+ "\n",
+ "5.Use the trained model for reasoning and visualization."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "14af9033",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "39552eaf",
+ "metadata": {},
+ "source": [
+ "## Model Architecture\n",
+ "\n",
+ "The basic framework of HDNN consists of convolutional neural network (CNN), convolutional long short-term memory network (ConvLSTM) and deconvolution neural network (DeCNN). CNN reduces the dimensionality of the time series flow field and achieves feature extraction; ConvLSTM learns low dimensional spatiotemporal features and makes predictions; Finally, DeCNN achieves reconstruction of predicted flow fields\n",
+ "\n",
+ "+ Input layer: Input historical flow field\n",
+ "+ Convolutional layer: Using multi-layer CNN to reduce the dimensionality of the input flow field and extract high-dimensional spatiotemporal flow characteristics\n",
+ "+ Memory layer: learning the evolution of spatiotemporal characteristics of low dimensional spatial flow fields through ConvLSTM and predicting the next moment\n",
+ "+ Deconvolution output layer: Restores the low-dimensional features of the predicted flow field to high-dimensional space, reconstructs the transient flow field at the next moment through multi-layer DeCNN, and outputs visual prediction results"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "901d5c41",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7f317de2",
+ "metadata": {},
+ "source": [
+ "## Training dataset\n",
+ "\n",
+ "The dataset is constructed from multidimensional matrix flow field snapshot matrix constructed from numerical simulation of unsteady two-dimensional cylindrical flow field data\n",
+ "\n",
+ "+ A two-dimensional cylinder undergoes one-dimensional harmonic vibration in a uniform flow field, with vibration frequencies f (Hz) of 1.25, 1.43, 1.67, and 2.00, and amplitude ratios A/D of 0.5, 0.6, 0.7, and 0.8, respectively. Pairwise combination for a total of 16 sets of motion states\n",
+ "+ The dataset is a series of unsteady flow field data in a certain state (f, A/D)\n",
+ "+ Each flow field snapshot contains three channels, representing the pressure distribution information, horizontal velocity information, and vertical velocity information of the flow field. The size of the multi-dimensional matrix flow field snapshot matrix is: T × C × H × W (C is the number of channels, H, W are the height and width of the snapshot, respectively)\n",
+ "+ Dataset: [Download location](https://download.mindspore.cn/mindscience/mindflow/dataset/applications/data_driven/move_boundary_hdnn)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "f2847f63",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import time\n",
+ "import argparse\n",
+ "import numpy as np\n",
+ "\n",
+ "from mindspore import nn, ops, context, save_checkpoint, set_seed, data_sink, jit\n",
+ "from mindflow.utils import load_yaml_config\n",
+ "\n",
+ "from src import my_train_dataset, AEnet, save_loss_curve"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4e7406dd",
+ "metadata": {},
+ "source": [
+ "## Training environment\n",
+ "\n",
+ "+ The training adopts the static graphical model of Mindspot framework (GRAPH)\n",
+ "+ Train on CPU, GPU, or Ascend (single card)\n",
+ "+ The cylindrical vibration frequencies f (Hz) in the training dataset are 1.25, 1.43, and 1.67, respectively, and the amplitude ratios A/D are 0.5, 0.6, and 0.7, respectively. Pairwise combination for a total of 9 sets of motion states"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "f5c6d767",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "set_seed(0)\n",
+ "np.random.seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bbd5ca2c",
+ "metadata": {},
+ "source": [
+ "## Training hyperparameter\n",
+ "\n",
+ "Obtain hyperparameters for models, data, and optimizers from config"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "7e3ba84a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "parser = argparse.ArgumentParser(description=\"cylinder around flow ROM\")\n",
+ "\n",
+ "parser.add_argument(\"--mode\", type=str, default=\"GRAPH\", choices=[\"GRAPH\", \"PYNATIVE\"],\n",
+ " help=\"Context mode, support 'GRAPH', 'PYNATIVE'\")\n",
+ "parser.add_argument(\"--save_graphs\", type=bool, default=False, choices=[True, False],\n",
+ " help=\"Whether to save intermediate compilation graphs\")\n",
+ "parser.add_argument(\"--save_graphs_path\", type=str, default=\"./summary\")\n",
+ "parser.add_argument(\"--device_target\", type=str, default=\"GPU\", choices=[\"GPU\", \"Ascend\"],\n",
+ " help=\"The target device to run, support 'GPU','Ascend'\")\n",
+ "parser.add_argument(\"--device_id\", type=int, default=0, help=\"ID of the target device\")\n",
+ "parser.add_argument(\"--data_list\", type=list, default=['0.00', '0.25', '0.35', '0.45'],\n",
+ " help=\"The type for training, [0.00, 0.25, 0.35, 0.45] for multi_state training /n\"\n",
+ " \"[0.25],....,[0.45] for single_state training\")\n",
+ "parser.add_argument('--batch_size', type=int, default=16, help=\"mini batch_size\")\n",
+ "parser.add_argument(\"--config_file_path\", type=str, default=\"./config.yaml\")\n",
+ "\n",
+ "args = parser.parse_args()\n",
+ "\n",
+ "context.set_context(mode=context.GRAPH_MODE if args.mode.upper().startswith(\"GRAPH\") else context.PYNATIVE_MODE,\n",
+ " save_graphs=args.save_graphs, save_graphs_path=args.save_graphs_path,\n",
+ " device_target=args.device_target, device_id=args.device_id)\n",
+ "use_ascend = context.get_context(attr_key='device_target') == \"Ascend\"\n",
+ "\n",
+ "config = load_yaml_config(args.config_file_path)\n",
+ "data_params = config[\"data\"]\n",
+ "model_params = config[\"model\"]\n",
+ "optimizer_params = config[\"optimizer\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8e53d5ec",
+ "metadata": {},
+ "source": [
+ "## Training process file save path\n",
+ "\n",
+ "Save the trained model file in a folder every certain number of training sessions"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "aa53aed1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ckpt_dir = optimizer_params[\"ckpt_dir\"]\n",
+ "if not os.path.exists(ckpt_dir):\n",
+ " os.mkdir(ckpt_dir)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "505908fc",
+ "metadata": {},
+ "source": [
+ "## Constructing neural network and optimizer\n",
+ "\n",
+ "The convolutional layer of the neural network has a total of 12 layers, ConvLSTM has 1 layer, and deconvolution has a total of 12 layers\n",
+ "\n",
+ "The Loss function uses the Mean squared error Loss function, and the optimizer uses the Adam (Adaptive Moment Estimation) optimization algorithm"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "37e0f61b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model = AEnet(in_channels=model_params[\"in_channels\"],\n",
+ " num_layers=model_params[\"num_layers\"],\n",
+ " kernel_size=model_params[\"kernel_size\"],\n",
+ " num_convlstm_layers=model_params[\"num_convlstm_layers\"])\n",
+ "\n",
+ "loss_func = nn.MSELoss()\n",
+ "optimizer = nn.Adam(params=model.trainable_params(), learning_rate=optimizer_params[\"lr\"])\n",
+ "if use_ascend:\n",
+ " from mindspore.amp import DynamicLossScaler, auto_mixed_precision, all_finite\n",
+ " loss_scaler = DynamicLossScaler(1024, 2, 100)\n",
+ " auto_mixed_precision(model, 'O1')\n",
+ "else:\n",
+ " loss_scaler = None"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "89d32ff9",
+ "metadata": {},
+ "source": [
+ "## Training framework\n",
+ "\n",
+ "Define the forward propagation function forward_ Fn, compare the predicted value with the true value to obtain the loss value and return it"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "7e34bd79",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def forward_fn(inputs, velocity, label):\n",
+ " pred = model(inputs, velocity)\n",
+ " loss = loss_func(pred, label)\n",
+ "\n",
+ " if use_ascend:\n",
+ " loss = loss_scaler.scale(loss)\n",
+ " return loss\n",
+ "\n",
+ "grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "faacf783",
+ "metadata": {},
+ "source": [
+ "## Dataset loading\n",
+ "\n",
+ "To my_train_dataset parameter transfer to obtain training and validation datasets"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "dbe1356d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f\"==================Load data sample ===================\")\n",
+ "dataset_train, dataset_eval = my_train_dataset(data_params[\"data_dir\"],\n",
+ " data_params[\"time_steps\"],\n",
+ " args.data_list)\n",
+ "print(f\"======================End Load========================\\n\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9da7331a",
+ "metadata": {},
+ "source": [
+ "## Data sink and model training\n",
+ "\n",
+ "Define train_ Step and Eval_ Step and use data_ Sink acceleration training, output the loss value and usage time during the training process, and save the model file every certain training round"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "86c63294",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f\"====================Start train=======================\")\n",
+ "@jit\n",
+ "def train_step(inputs, velocity, label):\n",
+ " loss, grads = grad_fn(inputs, velocity, label)\n",
+ " if use_ascend:\n",
+ " loss = loss_scaler.unscale(loss)\n",
+ " if all_finite(grads):\n",
+ " grads = loss_scaler.unscale(grads)\n",
+ " loss = ops.depend(loss, optimizer(grads))\n",
+ " return loss\n",
+ "\n",
+ "@jit\n",
+ "def eval_step(inputs, velocity, label):\n",
+ " loss = forward_fn(inputs, velocity, label)\n",
+ " loss = ops.sqrt(loss)\n",
+ " return loss\n",
+ "\n",
+ "train_sink_process = data_sink(train_step, dataset_train, sink_size=1)\n",
+ "eval_sink_process = data_sink(eval_step, dataset_eval, sink_size=1)\n",
+ "train_data_size, eval_data_size = dataset_train.get_dataset_size(), dataset_eval.get_dataset_size()\n",
+ "\n",
+ "avg_train_losses = []\n",
+ "avg_valid_losses = []\n",
+ "\n",
+ "for epoch in range(1, optimizer_params[\"epochs\"] + 1):\n",
+ " train_losses = 0\n",
+ " valid_losses = 0\n",
+ "\n",
+ " local_time_beg = time.time()\n",
+ " model.set_train(True)\n",
+ "\n",
+ " for _ in range(train_data_size):\n",
+ " step_train_loss = ops.squeeze(train_sink_process(), axis=())\n",
+ " step_train_loss = step_train_loss.asnumpy().item()\n",
+ " train_losses += step_train_loss\n",
+ "\n",
+ " train_loss = train_losses / train_data_size\n",
+ " avg_train_losses.append(train_loss)\n",
+ "\n",
+ " print(f\"epoch: {epoch}, epoch average train loss: {train_loss :.6f}, \"\n",
+ " f\"epoch time: {(time.time() - local_time_beg):.2f}s\")\n",
+ "\n",
+ " if epoch % optimizer_params[\"eval_interval\"] == 0:\n",
+ " print(f\"=================Start Evaluation=====================\")\n",
+ "\n",
+ " eval_time_beg = time.time()\n",
+ " model.set_train(False)\n",
+ " for _ in range(eval_data_size):\n",
+ " step_eval_loss = ops.squeeze(eval_sink_process(), axis=())\n",
+ " step_eval_loss = step_eval_loss.asnumpy().item()\n",
+ " valid_losses += step_eval_loss\n",
+ "\n",
+ " valid_loss = valid_losses / eval_data_size\n",
+ " avg_valid_losses.append(valid_loss)\n",
+ "\n",
+ " print(f\"epoch: {epoch}, epoch average valid loss: {valid_loss :.6f}, \"\n",
+ " f\"epoch time: {(time.time() - eval_time_beg):.2f}s\")\n",
+ " print(f\"==================End Evaluation======================\")\n",
+ "\n",
+ " if epoch % optimizer_params[\"save_ckpt_interval\"] == 0:\n",
+ " save_checkpoint(model, f\"{ckpt_dir}/net_{epoch}.ckpt\")\n",
+ "\n",
+ "save_loss_curve(avg_train_losses, 'Epoch', 'avg_train_losses', 'Avg_train_losses Curve', 'Avg_train_losses.png')\n",
+ "save_loss_curve(avg_valid_losses, 'Epoch', 'avg_valid_losses', 'Avg_valid_losses Curve', 'Avg_valid_losses.png')\n",
+ "\n",
+ "print(f\"=====================End train========================\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b1262b22",
+ "metadata": {},
+ "source": [
+ "## Set training conditions for parameter transmission\n",
+ "\n",
+ "When running the file, pass in the necessary parameters through the parameter parser to start training, and print the process and device id, as well as the total training time"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "505f3e5b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if __name__ == \"__main__\":\n",
+ " print(\"Process ID:\", os.getpid())\n",
+ " print(f\"device id: {args.device_id}\")\n",
+ " start_time = time.time()\n",
+ " train()\n",
+ " print(f\"End-to-End total time: {(time.time() - start_time):.2f}s\")"
+ ]
+ },
+ {
+ "cell_type": "raw",
+ "id": "4bcfdbdd",
+ "metadata": {},
+ "source": [
+ "Process ID: 2801010\n",
+ "device id: 0\n",
+ "==================Load data sample ===================\n",
+ "======================End Load========================\n",
+ "\n",
+ "====================Start train=======================\n",
+ "epoch: 1, epoch average train loss: 0.069304, epoch time: 51.62s\n",
+ "epoch: 2, epoch average train loss: 0.011798, epoch time: 24.36s\n",
+ "epoch: 3, epoch average train loss: 0.010980, epoch time: 16.55s\n",
+ "epoch: 4, epoch average train loss: 0.010644, epoch time: 24.14s\n",
+ "epoch: 5, epoch average train loss: 0.010608, epoch time: 22.38s\n",
+ "epoch: 6, epoch average train loss: 0.010324, epoch time: 21.66s\n",
+ "epoch: 7, epoch average train loss: 0.010152, epoch time: 32.79s\n",
+ "epoch: 8, epoch average train loss: 0.009601, epoch time: 24.62s\n",
+ "epoch: 9, epoch average train loss: 0.009147, epoch time: 22.19s\n",
+ "epoch: 10, epoch average train loss: 0.008809, epoch time: 19.52s\n",
+ "=================Start Evaluation=====================\n",
+ "epoch: 10, epoch average valid loss: 0.098904, epoch time: 12.86s\n",
+ "==================End Evaluation======================\n",
+ "\n",
+ "...\n",
+ "\n",
+ "epoch: 91, epoch average train loss: 0.000274, epoch time: 28.49s\n",
+ "epoch: 92, epoch average train loss: 0.000280, epoch time: 27.60s\n",
+ "epoch: 93, epoch average train loss: 0.000231, epoch time: 20.99s\n",
+ "epoch: 94, epoch average train loss: 0.000297, epoch time: 18.26s\n",
+ "epoch: 95, epoch average train loss: 0.000417, epoch time: 21.94s\n",
+ "epoch: 96, epoch average train loss: 0.000228, epoch time: 27.41s\n",
+ "epoch: 97, epoch average train loss: 0.000232, epoch time: 18.61s\n",
+ "epoch: 98, epoch average train loss: 0.000250, epoch time: 26.81s\n",
+ "epoch: 99, epoch average train loss: 0.000217, epoch time: 21.16s\n",
+ "epoch: 100, epoch average train loss: 0.000244, epoch time: 18.09s\n",
+ "=================Start Evaluation=====================\n",
+ "epoch: 100, epoch average valid loss: 0.015813, epoch time: 15.06s\n",
+ "==================End Evaluation======================\n",
+ "=====================End train========================\n",
+ "End-to-End total time: 2575.05s"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "25aac646",
+ "metadata": {},
+ "source": [
+ "## Visualization of predicted flow field results\n",
+ "\n",
+ "+ The moving boundary flow field prediction is started by executing eval.py, which can be divided into two prediction methods: single step flow field prediction (infer_mode is \"one\") and continuous flow field prediction within a vibration period (infer_mode is \"cycle\"); Single step flow field prediction only predicts the flow field of one time step at the next moment, while continuous flow field prediction continuously predicts the flow field of a complete cycle\n",
+ "+ The following figure shows the results of a fully trained HDNN model for one-step prediction and one complete cycle prediction of unsteady moving boundaries with a vibration frequency of 1.43Hz and an amplitude of 0.8 (amplitude ratio generalization state) (displaying pressure field, horizontal velocity field, and vertical velocity field)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "a76ca937",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b0462a4d",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.16"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/move_boundary_hdnn_CN.ipynb b/MindFlow/applications/data_driven/move_boundary_hdnn/move_boundary_hdnn_CN.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..213859ffd2e195a12f851e52627cd1382e00e9c8
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/move_boundary_hdnn_CN.ipynb
@@ -0,0 +1,480 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "fda19887",
+ "metadata": {},
+ "source": [
+ "\n",
+ "# 运动边界非定常流场预测\n",
+ "\n",
+ "## 概述\n",
+ "\n",
+ "CFD作为一种通过数值方法来模拟和解析流体运动的重要工具,极大便利了流体力学相关问题的科学研究,在设计、优化和研究领域提供准确的数据和见解并发挥着重要作用。流体力学中具有代表性和研究价值的一类问题是:对具有移动边界的非定常流场系统进行模拟,以分析运动结构在流场中的受力情况,可在工程上优化设计运动结构,为航空航天飞行器以及航海器等外形优化提供方案策略。高精确度计算流体力学技术(CFD)能够准确模拟流场演化和结构受力情况,但是高精度动边界问题需要大量网格,导致硬件消耗和计算时间成本巨大,另外对动态网格的构造也格外耗时。\n",
+ "\n",
+ "面对CFD在应用于复杂问题时计算量巨大并且计算精度有待提高等问题,智能流体力学领域给出了行之有效的解决方案,深度学习可以通过深度神经网络可学习流动工况与流场之间的演化关系,快速实现流场高精度预测与重构。为了高效解决动边界流场重构问题,提出了一种混合深度神经网络(HDNN),以实现非定常动边界流场重构,并基于此实现流场快速预测。\n",
+ "\n",
+ "## 问题描述\n",
+ "\n",
+ "流场相关尺寸如图所示,其中 $Y = Asin(2πft)$ 代表圆柱体在竖直方向做简谐运动的运动表达式,A为振幅,f为频率;D代表圆柱体直径;矩形边界代表计算域。均匀来流流过运动圆柱体时,在流体与固体相互作用的影响下,会在圆柱体后方形成一系列复杂的流动现象,如边界层分离、交替出现的卡门涡街等,并演化为物理量随时间周期性变化的非均匀流场。\n",
+ "\n",
+ "## 技术路径\n",
+ "\n",
+ "MindFlow求解该问题的具体流程如下:\n",
+ "\n",
+ "1.根据CFD数值模拟结果创建数据集。\n",
+ "\n",
+ "2.使用MindSpore深度学习框架构建模型。\n",
+ "\n",
+ "3.定义优化器与损失函数。\n",
+ "\n",
+ "4.使用MindSpore的即时编译等加速模型训练。\n",
+ "\n",
+ "5.利用训练好的模型进行推理和可视化。"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "14af9033",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "39552eaf",
+ "metadata": {},
+ "source": [
+ "## 模型架构\n",
+ "\n",
+ "HDNN的基本框架由卷积神经网络(CNN)、卷积长短期记忆网络(ConvLSTM)和反卷积神经网络(DeCNN)组成。CNN降低了时间序列流场的维数,实现特征提取;ConvLSTM学习低维时空特征并进行预测;最后,DeCNN实现预测流场的重建\n",
+ "\n",
+ "+ 输入层:输入历史流场\n",
+ "+ 卷积层:通过多层CNN对输入流场进行降维,提取高维时空流动特征\n",
+ "+ 记忆层:通过ConvLSTM学习低维空间流场时空特征的演变,预测下一时刻\n",
+ "+ 反卷积输出层:将预测流场的低维特征恢复到高维空间,通过多层DeCNN重构下一时刻的瞬态流场,并输出可视化预测结果"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "901d5c41",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "7f317de2",
+ "metadata": {},
+ "source": [
+ "## 训练数据集\n",
+ "\n",
+ "数据集由非定常二维圆柱绕流的数值仿真流场数据构建的多维矩阵流场快照矩阵构建而成\n",
+ "\n",
+ "+ 二维圆柱在均匀来流流场中做一维简谐振动,振动频率f(Hz)分别为1.25、1.43、1.67、2.00,振幅比A/D分别为0.5、0.6、0.7、0.8。两两组合总共16组运动状态\n",
+ "+ 数据集为某一状态(f,A/D)下的非定常流场序列数据\n",
+ "+ 每张流场快照包含3个通道,代表流场的压强分布信息、水平速度信息、竖直速度信息,多维矩阵流场快照矩阵尺寸为:T×C×H×W(C为通道数,H,W分别为快照的高和宽)\n",
+ "+ 数据集:[下载位置](https://download.mindspore.cn/mindscience/mindflow/dataset/applications/data_driven/move_boundary_hdnn)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "f2847f63",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import os\n",
+ "import time\n",
+ "import argparse\n",
+ "import numpy as np\n",
+ "\n",
+ "from mindspore import nn, ops, context, save_checkpoint, set_seed, data_sink, jit\n",
+ "from mindflow.utils import load_yaml_config\n",
+ "\n",
+ "from src import my_train_dataset, AEnet, save_loss_curve"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4e7406dd",
+ "metadata": {},
+ "source": [
+ "## 训练环境\n",
+ "\n",
+ "+ 训练采用Mindspore框架的静态图模式(GRAPH)\n",
+ "+ 在CPU、GPU或Ascend进行训练(单卡)\n",
+ "+ 训练数据集中的圆柱振动频率f(Hz)分别为1.25、1.43、1.67,振幅比A/D分别为0.5、0.6、0.7。两两组合总共9组运动状态"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "f5c6d767",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "set_seed(0)\n",
+ "np.random.seed(0)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bbd5ca2c",
+ "metadata": {},
+ "source": [
+ "## 训练超参数\n",
+ "\n",
+ "从config中获得模型、数据、优化器的超参"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "7e3ba84a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "parser = argparse.ArgumentParser(description=\"cylinder around flow ROM\")\n",
+ "\n",
+ "parser.add_argument(\"--mode\", type=str, default=\"GRAPH\", choices=[\"GRAPH\", \"PYNATIVE\"],\n",
+ " help=\"Context mode, support 'GRAPH', 'PYNATIVE'\")\n",
+ "parser.add_argument(\"--save_graphs\", type=bool, default=False, choices=[True, False],\n",
+ " help=\"Whether to save intermediate compilation graphs\")\n",
+ "parser.add_argument(\"--save_graphs_path\", type=str, default=\"./summary\")\n",
+ "parser.add_argument(\"--device_target\", type=str, default=\"GPU\", choices=[\"GPU\", \"Ascend\"],\n",
+ " help=\"The target device to run, support 'GPU','Ascend'\")\n",
+ "parser.add_argument(\"--device_id\", type=int, default=0, help=\"ID of the target device\")\n",
+ "parser.add_argument(\"--data_list\", type=list, default=['0.00', '0.25', '0.35', '0.45'],\n",
+ " help=\"The type for training, [0.00, 0.25, 0.35, 0.45] for multi_state training /n\"\n",
+ " \"[0.25],....,[0.45] for single_state training\")\n",
+ "parser.add_argument('--batch_size', type=int, default=16, help=\"mini batch_size\")\n",
+ "parser.add_argument(\"--config_file_path\", type=str, default=\"./config.yaml\")\n",
+ "\n",
+ "args = parser.parse_args()\n",
+ "\n",
+ "context.set_context(mode=context.GRAPH_MODE if args.mode.upper().startswith(\"GRAPH\") else context.PYNATIVE_MODE,\n",
+ " save_graphs=args.save_graphs, save_graphs_path=args.save_graphs_path,\n",
+ " device_target=args.device_target, device_id=args.device_id)\n",
+ "use_ascend = context.get_context(attr_key='device_target') == \"Ascend\"\n",
+ "\n",
+ "config = load_yaml_config(args.config_file_path)\n",
+ "data_params = config[\"data\"]\n",
+ "model_params = config[\"model\"]\n",
+ "optimizer_params = config[\"optimizer\"]"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8e53d5ec",
+ "metadata": {},
+ "source": [
+ "## 训练过程文件保存路径\n",
+ "\n",
+ "将训练好的模型文件每隔一定训练次数保存在文件夹下"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "id": "aa53aed1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "ckpt_dir = optimizer_params[\"ckpt_dir\"]\n",
+ "if not os.path.exists(ckpt_dir):\n",
+ " os.mkdir(ckpt_dir)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "505908fc",
+ "metadata": {},
+ "source": [
+ "## 构建神经网络及优化器\n",
+ "\n",
+ "神经网络的卷积层共有12层,ConvLSTM有1层,反卷积共有12层\n",
+ "\n",
+ "损失函数使用均方误差(Mean Squared Error)损失函数,优化器使用Adam(Adaptive Moment Estimation)优化算法"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "id": "37e0f61b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "model = AEnet(in_channels=model_params[\"in_channels\"],\n",
+ " num_layers=model_params[\"num_layers\"],\n",
+ " kernel_size=model_params[\"kernel_size\"],\n",
+ " num_convlstm_layers=model_params[\"num_convlstm_layers\"])\n",
+ "\n",
+ "loss_func = nn.MSELoss()\n",
+ "optimizer = nn.Adam(params=model.trainable_params(), learning_rate=optimizer_params[\"lr\"])\n",
+ "if use_ascend:\n",
+ " from mindspore.amp import DynamicLossScaler, auto_mixed_precision, all_finite\n",
+ " loss_scaler = DynamicLossScaler(1024, 2, 100)\n",
+ " auto_mixed_precision(model, 'O1')\n",
+ "else:\n",
+ " loss_scaler = None"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "89d32ff9",
+ "metadata": {},
+ "source": [
+ "## 训练框架\n",
+ "\n",
+ "定义前向传播函数forward_fn,将预测值和真值比较得到损失值loss并返回"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "id": "7e34bd79",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def forward_fn(inputs, velocity, label):\n",
+ " pred = model(inputs, velocity)\n",
+ " loss = loss_func(pred, label)\n",
+ "\n",
+ " if use_ascend:\n",
+ " loss = loss_scaler.scale(loss)\n",
+ " return loss\n",
+ "\n",
+ "grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=False)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "faacf783",
+ "metadata": {},
+ "source": [
+ "## 数据集加载\n",
+ "\n",
+ "给my_train_dataset传参,得到训练数据集和验证数据集"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "dbe1356d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f\"==================Load data sample ===================\")\n",
+ "dataset_train, dataset_eval = my_train_dataset(data_params[\"data_dir\"],\n",
+ " data_params[\"time_steps\"],\n",
+ " args.data_list)\n",
+ "print(f\"======================End Load========================\\n\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9da7331a",
+ "metadata": {},
+ "source": [
+ "## 数据下沉及模型训练\n",
+ "\n",
+ "定义train_step和eval_step并使用data_sink加速训练,输出训练过程的损失值和使用时间,并每隔一定训练轮次保存模型文件"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "id": "86c63294",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "print(f\"====================Start train=======================\")\n",
+ "@jit\n",
+ "def train_step(inputs, velocity, label):\n",
+ " loss, grads = grad_fn(inputs, velocity, label)\n",
+ " if use_ascend:\n",
+ " loss = loss_scaler.unscale(loss)\n",
+ " if all_finite(grads):\n",
+ " grads = loss_scaler.unscale(grads)\n",
+ " loss = ops.depend(loss, optimizer(grads))\n",
+ " return loss\n",
+ "\n",
+ "@jit\n",
+ "def eval_step(inputs, velocity, label):\n",
+ " loss = forward_fn(inputs, velocity, label)\n",
+ " loss = ops.sqrt(loss)\n",
+ " return loss\n",
+ "\n",
+ "train_sink_process = data_sink(train_step, dataset_train, sink_size=1)\n",
+ "eval_sink_process = data_sink(eval_step, dataset_eval, sink_size=1)\n",
+ "train_data_size, eval_data_size = dataset_train.get_dataset_size(), dataset_eval.get_dataset_size()\n",
+ "\n",
+ "avg_train_losses = []\n",
+ "avg_valid_losses = []\n",
+ "\n",
+ "for epoch in range(1, optimizer_params[\"epochs\"] + 1):\n",
+ " train_losses = 0\n",
+ " valid_losses = 0\n",
+ "\n",
+ " local_time_beg = time.time()\n",
+ " model.set_train(True)\n",
+ "\n",
+ " for _ in range(train_data_size):\n",
+ " step_train_loss = ops.squeeze(train_sink_process(), axis=())\n",
+ " step_train_loss = step_train_loss.asnumpy().item()\n",
+ " train_losses += step_train_loss\n",
+ "\n",
+ " train_loss = train_losses / train_data_size\n",
+ " avg_train_losses.append(train_loss)\n",
+ "\n",
+ " print(f\"epoch: {epoch}, epoch average train loss: {train_loss :.6f}, \"\n",
+ " f\"epoch time: {(time.time() - local_time_beg):.2f}s\")\n",
+ "\n",
+ " if epoch % optimizer_params[\"eval_interval\"] == 0:\n",
+ " print(f\"=================Start Evaluation=====================\")\n",
+ "\n",
+ " eval_time_beg = time.time()\n",
+ " model.set_train(False)\n",
+ " for _ in range(eval_data_size):\n",
+ " step_eval_loss = ops.squeeze(eval_sink_process(), axis=())\n",
+ " step_eval_loss = step_eval_loss.asnumpy().item()\n",
+ " valid_losses += step_eval_loss\n",
+ "\n",
+ " valid_loss = valid_losses / eval_data_size\n",
+ " avg_valid_losses.append(valid_loss)\n",
+ "\n",
+ " print(f\"epoch: {epoch}, epoch average valid loss: {valid_loss :.6f}, \"\n",
+ " f\"epoch time: {(time.time() - eval_time_beg):.2f}s\")\n",
+ " print(f\"==================End Evaluation======================\")\n",
+ "\n",
+ " if epoch % optimizer_params[\"save_ckpt_interval\"] == 0:\n",
+ " save_checkpoint(model, f\"{ckpt_dir}/net_{epoch}.ckpt\")\n",
+ "\n",
+ "save_loss_curve(avg_train_losses, 'Epoch', 'avg_train_losses', 'Avg_train_losses Curve', 'Avg_train_losses.png')\n",
+ "save_loss_curve(avg_valid_losses, 'Epoch', 'avg_valid_losses', 'Avg_valid_losses Curve', 'Avg_valid_losses.png')\n",
+ "\n",
+ "print(f\"=====================End train========================\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "b1262b22",
+ "metadata": {},
+ "source": [
+ "## 设置训练条件 传参\n",
+ "\n",
+ "当运行该文件时,通过参数解析器传入必要参数,开始训练,并打印进程和设备id,以及训练总时间"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "id": "505f3e5b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "if __name__ == \"__main__\":\n",
+ " print(\"Process ID:\", os.getpid())\n",
+ " print(f\"device id: {args.device_id}\")\n",
+ " start_time = time.time()\n",
+ " train()\n",
+ " print(f\"End-to-End total time: {(time.time() - start_time):.2f}s\")"
+ ]
+ },
+ {
+ "cell_type": "raw",
+ "id": "4bcfdbdd",
+ "metadata": {},
+ "source": [
+ "Process ID: 2801010\n",
+ "device id: 0\n",
+ "==================Load data sample ===================\n",
+ "======================End Load========================\n",
+ "\n",
+ "====================Start train=======================\n",
+ "epoch: 1, epoch average train loss: 0.069304, epoch time: 51.62s\n",
+ "epoch: 2, epoch average train loss: 0.011798, epoch time: 24.36s\n",
+ "epoch: 3, epoch average train loss: 0.010980, epoch time: 16.55s\n",
+ "epoch: 4, epoch average train loss: 0.010644, epoch time: 24.14s\n",
+ "epoch: 5, epoch average train loss: 0.010608, epoch time: 22.38s\n",
+ "epoch: 6, epoch average train loss: 0.010324, epoch time: 21.66s\n",
+ "epoch: 7, epoch average train loss: 0.010152, epoch time: 32.79s\n",
+ "epoch: 8, epoch average train loss: 0.009601, epoch time: 24.62s\n",
+ "epoch: 9, epoch average train loss: 0.009147, epoch time: 22.19s\n",
+ "epoch: 10, epoch average train loss: 0.008809, epoch time: 19.52s\n",
+ "=================Start Evaluation=====================\n",
+ "epoch: 10, epoch average valid loss: 0.098904, epoch time: 12.86s\n",
+ "==================End Evaluation======================\n",
+ "\n",
+ "...\n",
+ "\n",
+ "epoch: 91, epoch average train loss: 0.000274, epoch time: 28.49s\n",
+ "epoch: 92, epoch average train loss: 0.000280, epoch time: 27.60s\n",
+ "epoch: 93, epoch average train loss: 0.000231, epoch time: 20.99s\n",
+ "epoch: 94, epoch average train loss: 0.000297, epoch time: 18.26s\n",
+ "epoch: 95, epoch average train loss: 0.000417, epoch time: 21.94s\n",
+ "epoch: 96, epoch average train loss: 0.000228, epoch time: 27.41s\n",
+ "epoch: 97, epoch average train loss: 0.000232, epoch time: 18.61s\n",
+ "epoch: 98, epoch average train loss: 0.000250, epoch time: 26.81s\n",
+ "epoch: 99, epoch average train loss: 0.000217, epoch time: 21.16s\n",
+ "epoch: 100, epoch average train loss: 0.000244, epoch time: 18.09s\n",
+ "=================Start Evaluation=====================\n",
+ "epoch: 100, epoch average valid loss: 0.015813, epoch time: 15.06s\n",
+ "==================End Evaluation======================\n",
+ "=====================End train========================\n",
+ "End-to-End total time: 2575.05s"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "25aac646",
+ "metadata": {},
+ "source": [
+ "## 预测流场结果可视化\n",
+ "\n",
+ "+ 动边界流场预测通过执行eval.py开始预测,分为两种预测方式:单步流场预测(infer_mode为\"one\")和一个振动周期内连续流场预测(infer_mode为\"cycle\");单步流场预测仅预测下一时刻一个时间步长的流场,连续流场预测则持续预测一个完整周期的流场\n",
+ "+ 下图为训练完备的HDNN模型实现对振动频率为1.43Hz,振幅为0.8(振幅比泛化状态)下非定常动边界单步预测和一完整周期预测的结果(展示压强场、水平速度场和竖直速度场)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d229664b",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "50e40259",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.16"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/src/__init__.py b/MindFlow/applications/data_driven/move_boundary_hdnn/src/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bbb9f003688f3127ead5506c2a3acb69e2b9faa
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/src/__init__.py
@@ -0,0 +1,27 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""init"""
+from .dataset import TrainDatasetSource, my_train_dataset, my_test_dataset
+from .model import AEnet
+from .utils import save_loss_curve
+
+__all__ = [
+ "TrainDatasetSource",
+ "my_train_dataset",
+ "my_test_dataset",
+ "AEnet",
+ "save_loss_curve"
+]
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/src/conv_lstm.py b/MindFlow/applications/data_driven/move_boundary_hdnn/src/conv_lstm.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd824e4e117e56d999b6f245e558a724b2af1309
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/src/conv_lstm.py
@@ -0,0 +1,208 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""hybrid deep neural network structure"""
+from mindspore import nn, ops, numpy, float32
+
+
+class ConvLSTMCell(nn.Cell):
+ """
+ The cell of ConvLSTM, which sequentially processes input data through convolution, regularization, LSTM operations
+ """
+
+ def __init__(self, input_dim, hidden_dim, kernel_size, bias):
+ """
+ Initialize ConvLSTM cell.
+ Parameters
+ ----------
+ input_dim: int
+ Number of channels of input tensor.
+ hidden_dim: int
+ Number of channels of hidden state.
+ kernel_size: (int, int)
+ Size of the convolutional kernel.
+ bias: bool
+ Whether or not to add the bias.
+ """
+ super(ConvLSTMCell, self).__init__()
+
+ self.input_dim = input_dim
+ self.hidden_dim = hidden_dim
+ self.kernel_size = kernel_size
+ self.bias = bias
+
+ self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
+ out_channels=4 * self.hidden_dim,
+ kernel_size=self.kernel_size,
+ stride=1,
+ pad_mode="same",
+ padding=0,
+ has_bias=self.bias,
+ data_format="NCHW")
+ self.norm = nn.BatchNorm2d(4 * self.hidden_dim)
+
+ def construct(self, input_tensor, cur_state):
+ """
+ Transform the input_tensor and cur_state, perform convolution and regularization, then perform LSTM operations
+ """
+ h_cur, c_cur = cur_state
+
+ combined = ops.concat(input_x=(input_tensor, h_cur), axis=1)
+ combined_conv = self.conv(combined)
+ combined_conv = self.norm(combined_conv)
+ cc_i, cc_f, cc_o, cc_g = ops.split(input_x=combined_conv, axis=1, output_num=4)
+
+ i = ops.sigmoid(cc_i)
+ f = ops.sigmoid(cc_f)
+ o = ops.sigmoid(cc_o)
+ g = ops.tanh(cc_g)
+
+ c_next = f * c_cur + i * g
+ h_next = o * ops.tanh(c_next)
+
+ return h_next, c_next
+
+ def init_hidden(self, batch_size, image_size, h_ini):
+ """
+ Initial state tensor initialization. State tensor 0 initialization for the first timestamp
+ Parameters
+ ----------
+ batch_size: int
+ Minimum batch size of trained samples
+ image_size: tuple of size[H,W]
+ Height and width of data images
+ """
+ height, width = image_size
+ h_ini = numpy.reshape(h_ini, (batch_size, 1, 1, 1))
+ h_ini = numpy.broadcast_to(h_ini, (batch_size, self.hidden_dim, height, width))
+
+ init_h = h_ini * numpy.ones(shape=(batch_size, self.hidden_dim, height, width)).astype(float32)
+ init_c = numpy.zeros(shape=(batch_size, self.hidden_dim, height, width)).astype(float32)
+
+ return (init_h, init_c)
+
+
+class ConvLSTM(nn.Cell):
+ """
+ Parameters:
+ input_dim: Number of channels in input
+ hidden_dim: Number of hidden channels
+ kernel_size: Size of kernel in convolutions
+ num_layers: Number of LSTM layers stacked on each other
+ batch_first: Whether or not dimension 0 is the batch or not
+ bias: Bias or no bias in Convolution
+ Input:
+ A tensor of size [B, T, C, H, W] or [T, B, C, H, W]
+ Output:
+ layer_output_list--size=[B,T,hidden_dim,H,W]
+ last_state_list--h.size=c.size = [B,hidden_dim,H,W]
+ A tuple of two lists of length num_layers .
+ 0 - layer_output_list is the list of lists of length T of each output
+ 1 - last_state_list is the list of last states
+ each element of the list is a tuple (h, c) for hidden state and memory
+ """
+
+ def __init__(self, input_dim, hidden_dim, kernel_size, num_layers, batch_first=False, bias=True):
+ super(ConvLSTM, self).__init__()
+
+ self._check_kernel_size_consistency(kernel_size)
+ kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
+ hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
+
+ if not len(kernel_size) == len(hidden_dim) == num_layers:
+ raise ValueError('Inconsistent list length.')
+
+ self.input_dim = input_dim
+ self.hidden_dim = hidden_dim
+ self.kernel_size = kernel_size
+ self.num_layers = num_layers
+ self.batch_first = batch_first
+ self.bias = bias
+
+ cell_list = []
+ for i in range(0, self.num_layers):
+ cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
+ cell_list.append(ConvLSTMCell(input_dim=cur_input_dim,
+ hidden_dim=self.hidden_dim[i],
+ kernel_size=self.kernel_size[i],
+ bias=self.bias))
+ self.cell_list = nn.CellList(cell_list)
+
+ @staticmethod
+ def _check_kernel_size_consistency(kernel_size):
+ """Detect the input kernel_ Does the size meet the requirements and require a kernel_size is list or tuple"""
+ if not (isinstance(kernel_size, tuple) or
+ (isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
+ raise ValueError('`kernel_size` must be tuple or list of tuples')
+
+ @staticmethod
+ def _extend_for_multilayer(param, num_layers):
+ """Expanding to multi-layer LSTM scenarios"""
+ if not isinstance(param, list):
+ param = [param] * num_layers
+ return param
+
+ def construct(self, input_tensor, h0):
+ """
+ Parameters
+ ----------
+ input_tensor: 5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
+ hidden_state: todo
+ None. todo implement stateful
+ Returns
+ -------
+ last_state_list, layer_output
+ """
+ if not self.batch_first:
+ input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
+
+ b1, _, _, h1, w1 = input_tensor.shape
+ hidden_state = self._init_hidden(batch_size=b1, image_size=(h1, w1), h_ini=h0)
+
+ layer_output_list = []
+ last_state_list = []
+
+ seq_len = input_tensor.shape[1]
+ cur_layer_input = input_tensor
+
+ for layer_idx in range(self.num_layers):
+ h, c = hidden_state[layer_idx]
+ output_inner = []
+ for t in range(seq_len):
+ h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :], cur_state=[h, c])
+ output_inner.append(h)
+
+ layer_output = ops.stack(output_inner, axis=1)
+ cur_layer_input = layer_output
+
+ layer_output_list.append(layer_output)
+ last_state_list.append([h, c])
+
+ return layer_output_list, last_state_list
+
+ def _init_hidden(self, batch_size, image_size, h_ini):
+ """
+ Initialize the input state 0 of the first timestamp of all LSTM layers
+ Parameters
+ ----------
+ batch_size: int
+ Minimum batch size of trained samples
+ image_size: tuple of size[H,W]
+ Height and width of data images
+ """
+ init_states = []
+ for i in range(self.num_layers):
+ init_states.append(self.cell_list[i].init_hidden(batch_size, image_size, h_ini))
+ return init_states
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/src/dataset.py b/MindFlow/applications/data_driven/move_boundary_hdnn/src/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba5fedf2d2922c207de1939254a9bb6e358cd42e
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/src/dataset.py
@@ -0,0 +1,159 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Loading data and create dataset"""
+from collections import namedtuple
+
+import numpy as np
+import h5py
+
+import mindspore.dataset as ds
+
+
+class TrainDatasetSource:
+ """
+ Indexing the dataset based on data_dir and dataset_list, processing the dataset and returning train_dataset,
+ train_velocity, valid_dataset, valid_velocity
+ Parameters:
+ data_dir: Path address of the dataset
+ dataset_list: The train data list:['0.00', '0.25', '0.35', '0.45']
+ """
+
+ def __init__(self, data_dir, dataset_list, ratio=0.8):
+ self.data_dir = data_dir
+ self.dataset_list = dataset_list
+ self.ratio = ratio
+
+ def train_data(self):
+ """ data for train"""
+ train_dataset = []
+ valid_dataset = []
+ train_velocity = []
+ valid_velocity = []
+ for i in self.dataset_list:
+ data_source = h5py.File(f"{self.data_dir}/f0.90h{i}/project/total_puv_project.mat")
+ data_sample = data_source['total_puv'][:]
+ data_sample = np.array(data_sample.transpose([0, 3, 1, 2]), np.float32)
+
+ data_length = data_sample.shape[0]
+ train_dataset.append(data_sample[0:int(data_length * self.ratio)])
+ valid_dataset.append(data_sample[int(data_length * self.ratio):])
+
+ data_source = h5py.File(f"{self.data_dir}/f0.90h{i}/project/velocity.mat")
+ data_velocity = data_source['velocity'][:]
+ data_velocity = np.array(data_velocity, np.float32)
+
+ train_velocity.append(data_velocity[0:int(data_length * self.ratio)])
+ valid_velocity.append(data_velocity[int(data_length * self.ratio):])
+
+ DatasetResult = namedtuple('DatasetResult',
+ ['train_dataset', 'train_velocity', 'valid_dataset', 'valid_velocity'])
+
+ return DatasetResult(train_dataset, train_velocity, valid_dataset, valid_velocity)
+
+
+class TrainDatasetMake:
+ """
+ According dataset, velocity, time_steps and dataset_list to make train dataset so that retrieve data based on index.
+ Parameters:
+ dataset: Train data and valid data
+ velocity: The speed of the moving structure
+ time_steps: The number of time steps to predict
+ dataset_list: The data list
+ """
+
+ def __init__(self, dataset, velocity, time_steps, dataset_list):
+ self.dataset = dataset
+ self.velocity = velocity
+ self.time_steps = time_steps
+ self.dataset_numbers = len(dataset_list)
+
+ def __len__(self):
+ return (len(self.dataset[0]) - self.time_steps) * self.dataset_numbers
+
+ def __getitem__(self, idx):
+ idx_dataset = idx // (len(self.dataset[0]) - self.time_steps)
+ idx = idx % (len(self.dataset[0]) - self.time_steps)
+
+ return self.dataset[idx_dataset][idx:idx + self.time_steps], \
+ self.velocity[idx_dataset][idx:idx + self.time_steps], \
+ self.dataset[idx_dataset][idx + self.time_steps]
+
+
+def my_train_dataset(data_dir, time_steps, dataset_list):
+ """According data_dir, time_steps and dataset_list to process and generate train_dataset, valid_dataset"""
+ train_data, train_velocity, valid_data, valid_velocity = TrainDatasetSource(data_dir, dataset_list).train_data()
+
+ train_dataset = TrainDatasetMake(train_data, train_velocity, time_steps, dataset_list)
+ train_dataset = ds.GeneratorDataset(train_dataset, ["inputs", "v", "labels"], shuffle=True)
+ train_dataset = train_dataset.batch(batch_size=16, drop_remainder=True)
+
+ valid_dataset = TrainDatasetMake(valid_data, valid_velocity, time_steps, dataset_list)
+ valid_dataset = ds.GeneratorDataset(valid_dataset, ["inputs", "v", "labels"], shuffle=False)
+ valid_dataset = valid_dataset.batch(batch_size=16, drop_remainder=True)
+
+ return train_dataset, valid_dataset
+
+
+class TestDatasetMake:
+ """
+ According dataset, velocity, matrix_01 and time_steps to make dataset so that retrieve data based on index.
+ Parameters:
+ dataset: Train data and valid data
+ velocity: The speed of the moving structure
+ matrix_01: The matrix of test data, 4-D logical. Each element is a Boolean value
+ time_steps: The number of time steps to predict
+ """
+
+ def __init__(self, dataset, velocity, matrix_01, time_steps):
+ self.dataset = dataset
+ self.velocity = velocity
+ self.matrix_01 = matrix_01
+ self.time_steps = time_steps
+
+ def __len__(self):
+ return len(self.dataset) - self.time_steps
+
+ def __getitem__(self, idx):
+ test_input = self.dataset[idx:idx + self.time_steps]
+ test_velocity = self.velocity[idx:idx + self.time_steps]
+ test_label = self.dataset[idx + self.time_steps]
+ test_matrix_01 = self.matrix_01[idx + self.time_steps]
+
+ TestDatasetResult = namedtuple('TestDatasetResult',
+ ['test_input', 'test_velocity', 'test_label', 'test_matrix_01'])
+
+ return TestDatasetResult(test_input, test_velocity, test_label, test_matrix_01)
+
+
+def my_test_dataset(data_dir, time_steps):
+ """According data_dir, time_steps and time_steps to process and generate test_dataset"""
+ data_source = h5py.File(f"{data_dir}/project/total_puv_project.mat")
+ data_sample = data_source['total_puv'][0:10]
+ test_data = np.array(data_sample.transpose([0, 3, 1, 2]), np.float32)
+
+ data_source = h5py.File(f"{data_dir}/project/velocity.mat")
+ data_sample = data_source['velocity'][0:10]
+ test_velocity = np.array(data_sample, np.float32)
+
+ data_source = h5py.File(f"{data_dir}/project/Matrix_01.mat")
+ data_sample = data_source['Matrix'][0:10]
+ data_matrix_01 = np.array(data_sample.transpose([0, 3, 1, 2]), np.float32)
+
+ test_dataset = TestDatasetMake(test_data, test_velocity, data_matrix_01, time_steps)
+ test_dataset = ds.GeneratorDataset(test_dataset, ["input", "velocity", "label", "matrix_01"], shuffle=False)
+ test_dataset = test_dataset.batch(batch_size=1, drop_remainder=True)
+
+ return test_dataset
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/src/model.py b/MindFlow/applications/data_driven/move_boundary_hdnn/src/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff614ff4d0e257e098fb91ca7dc22f5c713869bc
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/src/model.py
@@ -0,0 +1,151 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""hybrid deep neural network structure"""
+from mindspore import nn, ops
+
+from .conv_lstm import ConvLSTM
+
+
+class Encoder(nn.Cell):
+ """The Convolutional layer (12 layers) of Hybrid Deep Neural Network
+
+ Args:
+ in_channels (int): The number of channels in the input space.
+ num_layers (int): The number of Convolutional layer.
+ kernel_size(int): The size of Convolutional kernel in Convolutional layer.
+ has_bias(bool): Whether set bias for Convolutional layer.
+ weight_init(str): The way to perform weight initialization operation.
+ Input:
+ A tensor of size [B, C, H, W] = [16, 3, 192, 128]
+ Output:
+ A tensor of size [B, C, H, W] = [16, 128, 3, 2]
+ Example:
+ >> encoder = Encoder(in_channels=3, num_layers=12, kernel_size=4, has_bias=True, weight_init='XavierUniform')
+ """
+
+ def __init__(self, in_channels, num_layers, kernel_size, has_bias=True, weight_init='XavierUniform',
+ activation=nn.LeakyReLU()):
+ super(Encoder, self).__init__()
+
+ layers = []
+ for num in range(1, num_layers + 1):
+ if num == 1:
+ layers.extend([nn.Conv2d(in_channels, 2 ** (num + 1), kernel_size, stride=2, padding=0, pad_mode='same',
+ has_bias=has_bias, weight_init=weight_init, data_format='NCHW'), activation])
+ elif num % 2 == 0:
+ layers.extend([nn.Conv2d(int(2 ** (num / 2 + 1)), int(2 ** (num / 2 + 1)), kernel_size - 1, stride=1,
+ padding=0, pad_mode='same', has_bias=has_bias, weight_init=weight_init,
+ data_format='NCHW'), activation])
+ elif num % 2 == 1:
+ layers.extend([nn.Conv2d(int(2 ** ((num + 1) / 2)), int(2 ** ((num + 3) / 2)), kernel_size, stride=2,
+ padding=0, pad_mode='same', has_bias=has_bias, weight_init=weight_init,
+ data_format='NCHW'), activation])
+ self.convlayers = nn.SequentialCell(layers)
+
+ def construct(self, x):
+ x = self.convlayers(x)
+ return x
+
+
+class Decoder(nn.Cell):
+ """The DeConvolutional layer (12 layers) of Hybrid Deep Neural Network
+
+ Args:
+ in_channels (int): The number of channels in the input space.
+ num_layers (int): The number of DeConvolutional layer.
+ kernel_size(int): The size of DeConvolutional kernel in DeConvolutional layer.
+ weight_init(str): The way to perform weight initialization operation.
+ Input:
+ A tensor of size [B, C, H, W] = [16, 128, 3, 2]
+ Output:
+ A tensor of size [B, C, H, W] = [16, 3, 192, 128]
+
+ Example:
+ >> Decoder = Decoder(in_channels=128, num_layers=12, kernel_size=4, weight_init='XavierUniform')
+ """
+
+ def __init__(self, in_channels, num_layers, kernel_size, weight_init='XavierUniform', activation=nn.LeakyReLU()):
+ super(Decoder, self).__init__()
+
+ layers = []
+ for num in range(1, num_layers + 1):
+ if num == num_layers:
+ layers.extend(
+ [nn.Conv2d(in_channels, in_channels, kernel_size + 1, weight_init=weight_init, stride=1,
+ pad_mode='same', padding=0), activation])
+ elif num == num_layers - 1:
+ layers.extend([nn.Conv2dTranspose(in_channels + 1, in_channels, kernel_size, stride=2, pad_mode='same',
+ padding=0), activation])
+ elif num % 2 == 1:
+ layers.extend([nn.Conv2dTranspose(int(2 ** ((15 - num) / 2)), int(2 ** ((13 - num) / 2)), kernel_size,
+ stride=2, padding=0, pad_mode='same', weight_init=weight_init),
+ activation])
+ elif num % 2 == 0:
+ layers.extend([nn.Conv2d(int(2 ** ((14 - num) / 2)), int(2 ** ((14 - num) / 2)), kernel_size - 1,
+ stride=1, padding=0, pad_mode='same', weight_init=weight_init), activation])
+ self.deconv_layers = nn.SequentialCell(layers)
+
+ def construct(self, x):
+ x = self.deconv_layers(x)
+ return x
+
+
+class AEnet(nn.Cell):
+ r"""
+ A Hybrid Deep Neural Network Composed of Convolutional Layer, ConvLSTM, and Deconvolutional Layer
+
+ Args:
+ in_channels (int): The number of channels in the input space.
+ num_layers (int): The number of Convolutional and DeConvolutional layer.
+ kernel_size(int): The size of convolutional kernel in Convolutional and DeConvolutional layer.
+ num_convlstm_layers (int): The number of ConvLSTM Layer.
+
+ Inputs:
+ - **input** (Tensor) - Tensor of shape :math:`(*, in\_channels)`.
+
+ Outputs:
+ Tensor of shape :math:`(*, in\_channels)`.
+ """
+
+ def __init__(self,
+ in_channels,
+ num_layers,
+ kernel_size,
+ num_convlstm_layers):
+ super(AEnet, self).__init__()
+ self.encoder = Encoder(in_channels=in_channels, num_layers=num_layers, kernel_size=kernel_size)
+ self.convlstm = ConvLSTM(input_dim=128, hidden_dim=128, kernel_size=(3, 3), num_layers=num_convlstm_layers,
+ batch_first=True, bias=True)
+ self.decoder = Decoder(in_channels=in_channels, num_layers=num_layers, kernel_size=kernel_size)
+
+ def construct(self, x, velocity):
+ """
+ Unpacking the input data x in five dimensions, passing through the reshape, and inputting it into the
+ convolutional layer; Then send the output reshape and velocity to ConvLSTM; Then input the output result into
+ the deconvolution layer and output the final result
+ """
+ b, t, c, h, w = x.shape
+
+ con_in = ops.reshape(x, (b * t, c, h, w))
+ con_out = self.encoder(con_in)
+
+ lstm_in = ops.reshape(con_out, (b, t, con_out.shape[1], con_out.shape[2], con_out.shape[3]))
+ _, last_states = self.convlstm(lstm_in, velocity)
+ lstm_out = last_states[0][0]
+
+ out = self.decoder(lstm_out)
+
+ return out
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/src/utils.py b/MindFlow/applications/data_driven/move_boundary_hdnn/src/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..64200086c06a8dbf82d5f1b679f9b865771e60f9
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/src/utils.py
@@ -0,0 +1,27 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""draw the curve of the loss value changing with epoch, set the coordinate axis name, title and save the path"""
+import matplotlib.pyplot as plt
+
+
+def save_loss_curve(losses, xlabel, ylabel, title, save_path):
+ """draw and save curves of training loss and testing loss"""
+ plt.plot(losses)
+ plt.xlabel(xlabel)
+ plt.ylabel(ylabel)
+ plt.title(title)
+ plt.savefig(save_path)
+ plt.close()
diff --git a/MindFlow/applications/data_driven/move_boundary_hdnn/train.py b/MindFlow/applications/data_driven/move_boundary_hdnn/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ad6e7546795c9f418d9dedd7c58c5e36045d60e
--- /dev/null
+++ b/MindFlow/applications/data_driven/move_boundary_hdnn/train.py
@@ -0,0 +1,186 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train process"""
+import os
+import time
+import argparse
+import numpy as np
+
+from mindspore import nn, ops, context, save_checkpoint, set_seed, data_sink, jit
+
+from mindflow.utils import load_yaml_config
+
+from src import my_train_dataset, AEnet, save_loss_curve
+
+np.random.seed(0)
+set_seed(0)
+
+
+def train():
+ """train process"""
+ # prepare params
+ config = load_yaml_config(args.config_file_path)
+ data_params = config["data"]
+ model_params = config["model"]
+ optimizer_params = config["optimizer"]
+
+ # prepare file to save the trained model files
+ ckpt_dir = optimizer_params["ckpt_dir"]
+ if not os.path.exists(ckpt_dir):
+ os.mkdir(ckpt_dir)
+
+ # prepare the model to be trained, as well as loss function:MSE and optimizer:Adam
+ model = AEnet(in_channels=model_params["in_channels"],
+ num_layers=model_params["num_layers"],
+ kernel_size=model_params["kernel_size"],
+ num_convlstm_layers=model_params["num_convlstm_layers"])
+ loss_func = nn.MSELoss()
+ optimizer = nn.Adam(params=model.trainable_params(), learning_rate=optimizer_params["lr"])
+
+ # when using Ascend for training, introducing dynamic loss scaler and automatic mixed accuracy training methods
+ if use_ascend:
+ from mindspore.amp import DynamicLossScaler, auto_mixed_precision, all_finite
+ loss_scaler = DynamicLossScaler(1024, 2, 100)
+ auto_mixed_precision(model, 'O1')
+ else:
+ loss_scaler = None
+
+ # define a forward propagation function
+ def forward_fn(inputs, velocity, label):
+ pred = model(inputs, velocity)
+ loss = loss_func(pred, label)
+
+ if use_ascend:
+ loss = loss_scaler.scale(loss)
+ return loss
+
+ # calculate function forward_ Fn and return the value and gradient of the function
+ grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=False)
+
+ # prepare dataset
+ print(f"==================Load data sample ===================")
+ dataset_train, dataset_eval = my_train_dataset(data_params["data_dir"],
+ data_params["time_steps"],
+ args.data_list)
+ print(f"======================End Load========================\n")
+
+ print(f"====================Start train=======================")
+
+ # define a function decorated with @jit to perform training steps, which calls and saves the function to calculate
+ # loss values and gradients. Using decorators can improve the execution efficiency of functions
+ @jit
+ def train_step(inputs, velocity, label):
+ loss, grads = grad_fn(inputs, velocity, label)
+ if use_ascend:
+ loss = loss_scaler.unscale(loss)
+ if all_finite(grads):
+ grads = loss_scaler.unscale(grads)
+ loss = ops.depend(loss, optimizer(grads))
+ return loss
+
+ @jit
+ def eval_step(inputs, velocity, label):
+ loss = forward_fn(inputs, velocity, label)
+ loss = ops.sqrt(loss)
+ return loss
+
+ # define train_sink_process and eval_sink_process,obtain data from the dataset, preprocess it and input it into the
+ # training steps for model training
+ train_sink_process = data_sink(train_step, dataset_train, sink_size=1)
+ eval_sink_process = data_sink(eval_step, dataset_eval, sink_size=1)
+ train_data_size, eval_data_size = dataset_train.get_dataset_size(), dataset_eval.get_dataset_size()
+
+ # average training loss per epoch
+ avg_train_losses = []
+ # average validation loss per epoch
+ avg_valid_losses = []
+
+ # start epoch training
+ for epoch in range(1, optimizer_params["epochs"] + 1):
+ train_losses = 0
+ valid_losses = 0
+
+ local_time_beg = time.time()
+ model.set_train(True)
+
+ for _ in range(train_data_size):
+ step_train_loss = ops.squeeze(train_sink_process(), axis=())
+ step_train_loss = step_train_loss.asnumpy().item()
+ train_losses += step_train_loss
+
+ train_loss = train_losses / train_data_size
+ avg_train_losses.append(train_loss)
+
+ print(f"epoch: {epoch}, epoch average train loss: {train_loss :.6f}, "
+ f"epoch time: {(time.time() - local_time_beg):.2f}s")
+
+ if epoch % optimizer_params["eval_interval"] == 0:
+ print(f"=================Start Evaluation=====================")
+
+ eval_time_beg = time.time()
+ model.set_train(False)
+ for _ in range(eval_data_size):
+ step_eval_loss = ops.squeeze(eval_sink_process(), axis=())
+ step_eval_loss = step_eval_loss.asnumpy().item()
+ valid_losses += step_eval_loss
+
+ valid_loss = valid_losses / eval_data_size
+ avg_valid_losses.append(valid_loss)
+
+ print(f"epoch: {epoch}, epoch average valid loss: {valid_loss :.6f}, "
+ f"epoch time: {(time.time() - eval_time_beg):.2f}s")
+ print(f"==================End Evaluation======================")
+
+ # save the ckpt file of the trained model in the folder
+ if epoch % optimizer_params["save_ckpt_interval"] == 0:
+ save_checkpoint(model, f"{ckpt_dir}/net_{epoch}.ckpt")
+
+ # draw and save curves of training loss and testing loss
+ save_loss_curve(avg_train_losses, 'Epoch', 'avg_train_losses', 'Avg_train_losses Curve', 'Avg_train_losses.png')
+ save_loss_curve(avg_valid_losses, 'Epoch', 'avg_valid_losses', 'Avg_valid_losses Curve', 'Avg_valid_losses.png')
+
+ print(f"=====================End train========================")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="cylinder around flow ROM")
+
+ parser.add_argument("--mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
+ help="Context mode, support 'GRAPH', 'PYNATIVE'")
+ parser.add_argument("--save_graphs", type=bool, default=False, choices=[True, False],
+ help="Whether to save intermediate compilation graphs")
+ parser.add_argument("--save_graphs_path", type=str, default="./summary")
+ parser.add_argument("--device_target", type=str, default="GPU", choices=["GPU", "Ascend"],
+ help="The target device to run, support 'GPU','Ascend'")
+ parser.add_argument("--device_id", type=int, default=0, help="ID of the target device")
+ parser.add_argument("--data_list", type=list, default=['0.00', '0.25', '0.35', '0.45'],
+ help="The type for training, [0.00, 0.25, 0.35, 0.45] for multi_state training /n"
+ "[0.25],....,[0.45] for single_state training")
+ parser.add_argument('--batch_size', type=int, default=16, help="batch size")
+ parser.add_argument("--config_file_path", type=str, default="./config.yaml")
+
+ args = parser.parse_args()
+
+ context.set_context(mode=context.GRAPH_MODE if args.mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
+ save_graphs=args.save_graphs, save_graphs_path=args.save_graphs_path,
+ device_target=args.device_target, device_id=args.device_id)
+ use_ascend = context.get_context(attr_key='device_target') == "Ascend"
+
+ print("Process ID:", os.getpid())
+ print(f"device id: {args.device_id}")
+ start_time = time.time()
+ train()
+ print(f"End-to-End total time: {(time.time() - start_time):.2f}s")
diff --git a/MindFlow/applications/data_driven/navier_stokes_kno/train.py b/MindFlow/applications/data_driven/navier_stokes_kno/train.py
index 62eeb7d71c8c8ac86de0c45646b6986a7bd9c950..2274ce1524cff82af989db6bf7c424279901346e 100644
--- a/MindFlow/applications/data_driven/navier_stokes_kno/train.py
+++ b/MindFlow/applications/data_driven/navier_stokes_kno/train.py
@@ -128,14 +128,17 @@ def main():
time_beg = time.time()
train_recons_full = 0.0
train_pred_full = 0.0
+ train_full = 0.0
for _ in range(train_size):
- _, l_recons, l_pred = train_sink()
+ l_full, l_recons, l_pred = train_sink()
train_recons_full += l_recons.asnumpy()
train_pred_full += l_pred.asnumpy()
+ train_full += l_full.asnumpy()
train_recons_full = train_recons_full / train_size
train_pred_full = train_pred_full / train_size
- print(f"epoch: {epoch}, time cost: {(time.time() - time_beg):>8f},"
- f" recons loss: {train_recons_full:>8f}, pred loss: {train_pred_full:>8f}")
+ train_full = train_full / train_size
+ print(f"epoch: {epoch}, time cost: {(time.time() - time_beg):>8f}s,"
+ f" recons loss: {train_recons_full:>8f}, pred loss: {train_pred_full:>8f}, Total loss: {train_full:>8f}")
if epoch % config['eval_interval'] == 0:
l_recons_all, l_pred_all = problem.test(test_input, test_label)
diff --git a/MindFlow/applications/data_driven/transonic_buffet_ehdnn/src/postprocess.py b/MindFlow/applications/data_driven/transonic_buffet_ehdnn/src/postprocess.py
index 74e0e99bb75d236e81c75f985dca51cbeeca16a8..d2d14319d1ed41da0e1f0efeb53e4282ccbae142 100644
--- a/MindFlow/applications/data_driven/transonic_buffet_ehdnn/src/postprocess.py
+++ b/MindFlow/applications/data_driven/transonic_buffet_ehdnn/src/postprocess.py
@@ -17,18 +17,18 @@
import os
import numpy as np
-import matplotlib.pyplot as plot
+import matplotlib.pyplot as plt
from scipy.interpolate import griddata
from scipy.io import loadmat
def plot_train_loss(train_loss, plot_dir, epochs):
"""Plot change of loss during training"""
- t_loss = plot.scatter(list(range(epochs)), train_loss, s=0.2)
- plot.xlabel('epoch')
- plot.ylabel('train_loss')
- plot.legend(t_loss, 'train', loc='upper right')
- plot.savefig(f'{plot_dir}/train_loss.png')
+ t_loss = plt.scatter(list(range(epochs)), train_loss, s=0.2)
+ plt.xlabel('epoch')
+ plt.ylabel('train_loss')
+ plt.legend([t_loss], ['train'], loc='upper right')
+ plt.savefig(f'{plot_dir}/train_loss.png')
class PostProcess:
diff --git a/MindFlow/applications/data_driven/transonic_buffet_ehdnn/train.py b/MindFlow/applications/data_driven/transonic_buffet_ehdnn/train.py
index a7f6da32eaf51b856df4ae28cb7096e2f75f5853..0747ef7b7666004a472b2e579872d5e9544e5ff2 100644
--- a/MindFlow/applications/data_driven/transonic_buffet_ehdnn/train.py
+++ b/MindFlow/applications/data_driven/transonic_buffet_ehdnn/train.py
@@ -82,7 +82,7 @@ def train():
if use_ascend:
from mindspore.amp import DynamicLossScaler, auto_mixed_precision, all_finite
loss_scaler = DynamicLossScaler(1024, 2, 100)
- auto_mixed_precision('O3')
+ auto_mixed_precision(model, 'O3')
else:
loss_scaler = None
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/README.md b/MindFlow/applications/data_mechanism_fusion/PerCNN/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/loss.npy b/MindFlow/applications/data_mechanism_fusion/PerCNN/loss.npy
new file mode 100644
index 0000000000000000000000000000000000000000..6dfa40319fb9b3d2a17a8ac1eb2d877ce78dd154
Binary files /dev/null and b/MindFlow/applications/data_mechanism_fusion/PerCNN/loss.npy differ
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/model/checkpoint.pt b/MindFlow/applications/data_mechanism_fusion/PerCNN/model/checkpoint.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5be1048e99b1009dadd99730ce4284ad3d367d66
Binary files /dev/null and b/MindFlow/applications/data_mechanism_fusion/PerCNN/model/checkpoint.pt differ
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/model/checkpointpre_train.ckpt b/MindFlow/applications/data_mechanism_fusion/PerCNN/model/checkpointpre_train.ckpt
new file mode 100644
index 0000000000000000000000000000000000000000..f51611e4ee64e87852dc522e51750bce5cc9dea3
Binary files /dev/null and b/MindFlow/applications/data_mechanism_fusion/PerCNN/model/checkpointpre_train.ckpt differ
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/model/checkpointtrain.ckpt b/MindFlow/applications/data_mechanism_fusion/PerCNN/model/checkpointtrain.ckpt
new file mode 100644
index 0000000000000000000000000000000000000000..fc71b970470950adb063b5b91a6df080fc0c3a99
Binary files /dev/null and b/MindFlow/applications/data_mechanism_fusion/PerCNN/model/checkpointtrain.ckpt differ
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/percnn_burgers.yaml b/MindFlow/applications/data_mechanism_fusion/PerCNN/percnn_burgers.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9aaec130a0814bf089b8402532bfcf3174129021
--- /dev/null
+++ b/MindFlow/applications/data_mechanism_fusion/PerCNN/percnn_burgers.yaml
@@ -0,0 +1,13 @@
+pretrain:
+ epochs: 6000
+ name_conf: "pre_train"
+ learning_rates: 0.001
+
+train:
+ learning_rates: 2e-3
+ gama: 15000
+ name_conf: 0.96
+ epochs: "train"
+
+fig_save_path: "./figures_ms/"
+ckpt_file_name: "./model/checkpointtrain.ckpt"
\ No newline at end of file
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/src/__init__.py b/MindFlow/applications/data_mechanism_fusion/PerCNN/src/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..876400b6836bda54022f48555f5e3fcd14658487
--- /dev/null
+++ b/MindFlow/applications/data_mechanism_fusion/PerCNN/src/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""init"""
+from .constant import dx_2d_op, dy_2d_op, lap_2d_op
+from .model import RCNN
+from .tools import post_process_v2
+from .trainer import Trainer
+
+__all__ = [
+ "dx_2d_op",
+ "dy_2d_op",
+ "lap_2d_op",
+ "RCNN",
+ "Trainer",
+ "post_process_v2",
+]
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/src/constant.py b/MindFlow/applications/data_mechanism_fusion/PerCNN/src/constant.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bf53112263e3dad38f76c30499e0bfa2cf4308f
--- /dev/null
+++ b/MindFlow/applications/data_mechanism_fusion/PerCNN/src/constant.py
@@ -0,0 +1,33 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""constants"""
+dx_2d_op = [[[[0, 0, 1/12, 0, 0],
+ [0, 0, -8/12, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 8/12, 0, 0],
+ [0, 0, -1/12, 0, 0]]]]
+
+dy_2d_op = [[[[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [1/12, -8/12, 0, 8/12, -1/12],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0]]]]
+
+lap_2d_op = [[[[0, 0, -1/12, 0, 0],
+ [0, 0, 4/3, 0, 0],
+ [-1/12, 4/3, - 5, 4/3, -1/12],
+ [0, 0, 4/3, 0, 0],
+ [0, 0, -1/12, 0, 0]]]]
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/src/model.py b/MindFlow/applications/data_mechanism_fusion/PerCNN/src/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..758367d884676c785060d6114c93ad47f22088d3
--- /dev/null
+++ b/MindFlow/applications/data_mechanism_fusion/PerCNN/src/model.py
@@ -0,0 +1,197 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""model"""
+import numpy as np
+from mindspore import nn, ops, Tensor, Parameter, float32
+
+from .constant import lap_2d_op
+
+
+class UpScaler(nn.Cell):
+ ''' Upscaler (ISG) to convert low-res to high-res initial state '''
+
+ def __init__(self, in_channels, out_channels, hidden_channels, kernel_size, stride, has_bais):
+ super(UpScaler, self).__init__()
+ self.up0 = nn.Conv2dTranspose(in_channels, hidden_channels, kernel_size=kernel_size, pad_mode='pad',
+ padding=kernel_size // 2, stride=stride,
+ has_bias=has_bais)
+ self.pad = nn.Pad(
+ paddings=((0, 0), (0, 0), (0, 1), (0, 1)), mode="CONSTANT")
+ self.conv = nn.Conv2d(in_channels=hidden_channels, out_channels=hidden_channels, kernel_size=kernel_size,
+ pad_mode="same", has_bias=has_bais)
+ # 1x1 layer
+ self.out = nn.Conv2d(hidden_channels, out_channels,
+ kernel_size=1, pad_mode="valid", has_bias=has_bais)
+
+ def construct(self, x):
+ x = self.up0(x)
+ x = self.pad(x)
+ x = self.conv(x)
+ x = ops.tanh(x)
+ x = self.out(x)
+ return x
+
+
+class RecurrentCNNCell(nn.Cell):
+ ''' Recurrent convolutional neural network Cell '''
+
+ def __init__(self, input_channels, hidden_channels, kernel_size, compute_dtype):
+ super(RecurrentCNNCell, self).__init__()
+
+ # the initial parameters, output channel is always 1
+ self.input_channels = input_channels
+ self.hidden_channels = hidden_channels
+ self.kernel_size = kernel_size
+ self.input_stride = 1
+ self.compute_dtype = compute_dtype
+
+ self.dx = 1.0/100.0
+ self.dt = 0.00025
+ # nu from 0 to upper bound (two times the estimate)
+ self.nu_up = 0.0108
+
+ # Design the laplace_u term
+ self.ca = Parameter(
+ Tensor(np.random.rand(), dtype=self.compute_dtype), requires_grad=True)
+ self.cb = Parameter(
+ Tensor(np.random.rand(), dtype=self.compute_dtype), requires_grad=True)
+
+ # padding_mode='replicate' not working for the test
+ laplace = np.array(lap_2d_op)
+ self.w_laplace = Tensor(1/self.dx**2*laplace, dtype=self.compute_dtype)
+
+ # Parallel layer for u
+ self.wh0_u = nn.Conv2d(in_channels=input_channels, out_channels=hidden_channels, kernel_size=kernel_size,
+ stride=self.input_stride, pad_mode="valid", has_bias=True,)
+ self.wh1_u = nn.Conv2d(in_channels=input_channels, out_channels=hidden_channels, kernel_size=kernel_size,
+ stride=self.input_stride, pad_mode="valid", has_bias=True,)
+ self.wh2_u = nn.Conv2d(in_channels=input_channels, out_channels=hidden_channels, kernel_size=kernel_size,
+ stride=self.input_stride, pad_mode="valid", has_bias=True,)
+ self.wh3_u = nn.Conv2d(in_channels=input_channels, out_channels=hidden_channels, kernel_size=kernel_size,
+ stride=self.input_stride, pad_mode="valid", has_bias=True,)
+ # 1x1 layer for u
+ self.wh4_u = nn.Conv2d(in_channels=hidden_channels, out_channels=1, kernel_size=1,
+ stride=1, has_bias=True)
+ # Parallel layer for v
+ self.wh0_v = nn.Conv2d(in_channels=input_channels, out_channels=hidden_channels, kernel_size=kernel_size,
+ stride=self.input_stride, pad_mode="valid", has_bias=True,)
+ self.wh1_v = nn.Conv2d(in_channels=input_channels, out_channels=hidden_channels, kernel_size=kernel_size,
+ stride=self.input_stride, pad_mode="valid", has_bias=True,)
+ self.wh2_v = nn.Conv2d(in_channels=input_channels, out_channels=hidden_channels, kernel_size=kernel_size,
+ stride=self.input_stride, pad_mode="valid", has_bias=True,)
+ self.wh3_v = nn.Conv2d(in_channels=input_channels, out_channels=hidden_channels, kernel_size=kernel_size,
+ stride=self.input_stride, pad_mode="valid", has_bias=True,)
+ # 1x1 layer for v
+ self.wh4_v = nn.Conv2d(in_channels=hidden_channels, out_channels=1, kernel_size=1,
+ stride=1, has_bias=True)
+
+ # initialize filter's wweight and bias
+ self.filter_list = [self.wh0_u, self.wh1_u, self.wh2_u, self.wh3_u, self.wh4_u,
+ self.wh0_v, self.wh1_v, self.wh2_v, self.wh3_v, self.wh4_v,]
+
+ def init_filter(self, filter_list, c):
+ '''
+ :param filter_list: list of filter for initialization
+ :param c: constant multiplied on Xavier initialization
+ '''
+ for f in filter_list:
+ f.weight.data.uniform_(-c * np.sqrt(1 / np.prod(
+ f.weight.shape[:-1])), c * np.sqrt(1 / np.prod(f.weight.shape[:-1])))
+ if f.bias is not None:
+ f.bias.data.fill_(0.0)
+
+ def construct(self, h):
+ """construct function of RecurrentCNNCell"""
+ # manual periodic padding for diffusion conv layers (5x5 filters)
+ h_pad_2 = ops.concat(
+ (h[:, :, :, -2:], h, h[:, :, :, 0:2]), axis=3)
+ h_pad_2 = ops.concat(
+ (h_pad_2[:, :, -2:, :], h_pad_2, h_pad_2[:, :, 0:2, :]), axis=2)
+ u_pad_2 = h_pad_2[:, 0:1, ...] # 104x104
+ v_pad_2 = h_pad_2[:, 1:2, ...]
+ # previous state
+ u_prev = h[:, 0:1, ...] # 100x100
+ v_prev = h[:, 1:2, ...]
+
+ u_res = self.nu_up*ops.sigmoid(self.ca)*ops.conv2d(u_pad_2, self.w_laplace) + self.wh4_u(
+ self.wh0_u(h_pad_2)*self.wh1_u(h_pad_2)*self.wh2_u(h_pad_2)*self.wh3_u(h_pad_2))
+ v_res = self.nu_up*ops.sigmoid(self.cb)*ops.conv2d(v_pad_2, self.w_laplace) + self.wh4_v(
+ self.wh0_v(h_pad_2)*self.wh1_v(h_pad_2)*self.wh2_v(h_pad_2)*self.wh3_v(h_pad_2))
+
+ u_next = u_prev + u_res * self.dt
+ v_next = v_prev + v_res * self.dt
+ ch = ops.concat((u_next, v_next), axis=1)
+
+ return ch, ch
+
+
+class RCNN(nn.Cell):
+ ''' Recurrent convolutional neural network layer '''
+
+ def __init__(self, input_channels, hidden_channels, input_kernel_size,
+ infer_step=1, effective_step=None, compute_dtype=float32):
+ super(RCNN, self).__init__()
+
+ # input channels of layer includes input_channels and hidden_channels of cells
+ self.input_channels = input_channels
+ self.hidden_channels = hidden_channels
+ self.input_kernel_size = input_kernel_size
+ self.step = infer_step + 1
+ self.effective_step = effective_step
+ self.compute_dtype = compute_dtype
+
+ # Upconv as initial state generator
+ self.upconv_block = UpScaler(in_channels=input_channels,
+ out_channels=2,
+ hidden_channels=8,
+ kernel_size=5,
+ stride=2,
+ has_bais=True)
+
+ self.cell = RecurrentCNNCell(input_channels=self.input_channels,
+ hidden_channels=self.hidden_channels,
+ kernel_size=self.input_kernel_size,
+ compute_dtype=self.compute_dtype)
+
+ def construct(self, init_state_low):
+ """construct function of RCNN"""
+ # We can freeze the IC or use UpconvBlock. UpconvBlock works slightly better but needs pretraining.
+ init_state = self.upconv_block(init_state_low)
+ internal_state = []
+ outputs = [init_state]
+ second_last_state = []
+
+ for step in range(self.step):
+ # all cells are initialized in the first step
+ if step == 0:
+ h = init_state
+ internal_state = h
+
+ # forward
+ h = internal_state
+ # hidden state + output
+ h, o = self.cell(h)
+ internal_state = h
+
+ if step == (self.step - 2):
+ # last output is a dummy for central FD
+ second_last_state = internal_state.copy()
+
+ # after many layers output the result save at time step t
+ if step in self.effective_step:
+ outputs.append(o)
+
+ return outputs, second_last_state
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/src/tools.py b/MindFlow/applications/data_mechanism_fusion/PerCNN/src/tools.py
new file mode 100644
index 0000000000000000000000000000000000000000..a704bb66365e6a2ff116b632f1ae9d5f9754716c
--- /dev/null
+++ b/MindFlow/applications/data_mechanism_fusion/PerCNN/src/tools.py
@@ -0,0 +1,106 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""tools"""
+import numpy as np
+import matplotlib.pyplot as plt
+
+
+def post_process_v2(output, truth, low_res, xmin, xmax, ymin, ymax, num, fig_save_path):
+ '''num: Number of time step'''
+ x = np.linspace(0, 1, 101)
+ y = np.linspace(0, 1, 101)
+ x_star, y_star = np.meshgrid(x, y)
+ u_low_res, v_low_res = low_res[num, 0, ...], low_res[num, 1, ...]
+ u_low_res, v_low_res = np.kron(u_low_res, np.ones((2, 2))), \
+ np.kron(v_low_res, np.ones((2, 2)))
+ u_low_res, v_low_res = np.concatenate((u_low_res, u_low_res[:, 0:1]), axis=1), \
+ np.concatenate((v_low_res, v_low_res[:, 0:1]), axis=1)
+ u_low_res, v_low_res = np.concatenate((u_low_res, u_low_res[0:1, :]), axis=0), \
+ np.concatenate((v_low_res, v_low_res[0:1, :]), axis=0)
+ u_star, v_star = truth[num, 0, ...], truth[num, 1, ...]
+ u_pred, v_pred = output[num, 0, :, :], output[num, 1, :, :]
+ #
+ fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(11, 7))
+ fig.subplots_adjust(hspace=0.25, wspace=0.25)
+ #
+ cf = ax[0, 0].scatter(x_star, y_star, c=u_pred, alpha=1.0,
+ edgecolors='none', cmap='RdYlBu', marker='s', s=4, vmin=-2, vmax=1.6)
+ ax[0, 0].axis('square')
+ ax[0, 0].set_xlim([xmin, xmax])
+ ax[0, 0].set_ylim([ymin, ymax])
+ ax[0, 0].set_xticks([])
+ ax[0, 0].set_yticks([])
+ ax[0, 0].set_title('u (PeCRNN)')
+ fig.colorbar(cf, ax=ax[0, 0], fraction=0.046, pad=0.04)
+ #
+ cf = ax[0, 1].scatter(x_star, y_star, c=u_star, alpha=1.0,
+ edgecolors='none', cmap='RdYlBu', marker='s', s=4, vmin=-2, vmax=1.6)
+ ax[0, 1].axis('square')
+ ax[0, 1].set_xlim([xmin, xmax])
+ ax[0, 1].set_ylim([ymin, ymax])
+ ax[0, 1].set_xticks([])
+ ax[0, 1].set_yticks([])
+ ax[0, 1].set_title('u (Ref.)')
+ fig.colorbar(cf, ax=ax[0, 1], fraction=0.046, pad=0.04)
+ #
+ cf = ax[0, 2].scatter(x_star, y_star, c=u_low_res, alpha=1.0,
+ edgecolors='none', cmap='RdYlBu', marker='s', s=4, vmin=-2, vmax=1.6)
+ ax[0, 2].axis('square')
+ ax[0, 2].set_xlim([xmin, xmax])
+ ax[0, 2].set_ylim([ymin, ymax])
+ ax[0, 2].set_xticks([])
+ ax[0, 2].set_yticks([])
+ ax[0, 2].set_title('u (Meas.)')
+ fig.colorbar(cf, ax=ax[0, 2], fraction=0.046, pad=0.04)
+ #
+ cf = ax[1, 0].scatter(x_star, y_star, c=v_pred, alpha=1.0, edgecolors='none',
+ cmap='RdYlBu', marker='s', s=4, vmin=-2.8, vmax=0.5)
+ ax[1, 0].axis('square')
+ ax[1, 0].set_xlim([xmin, xmax])
+ ax[1, 0].set_ylim([ymin, ymax])
+ ax[1, 0].set_xticks([])
+ ax[1, 0].set_yticks([])
+ ax[1, 0].set_title('v (PeCRNN)')
+ fig.colorbar(cf, ax=ax[1, 0], fraction=0.046, pad=0.04)
+ #
+ cf = ax[1, 1].scatter(x_star, y_star, c=v_star, alpha=1.0, edgecolors='none',
+ cmap='RdYlBu', marker='s', s=4, vmin=-2.8, vmax=0.5)
+ ax[1, 1].axis('square')
+ ax[1, 1].set_xlim([xmin, xmax])
+ ax[1, 1].set_ylim([ymin, ymax])
+ ax[1, 1].set_xticks([])
+ ax[1, 1].set_yticks([])
+ ax[1, 1].set_title('v (Ref.)')
+ fig.colorbar(cf, ax=ax[1, 1], fraction=0.046, pad=0.04)
+ #
+ cf = ax[1, 2].scatter(x_star, y_star, c=v_low_res, alpha=1.0,
+ edgecolors='none', cmap='RdYlBu', marker='s', s=4, vmin=-2.8, vmax=0.5)
+ ax[1, 2].axis('square')
+ ax[1, 2].set_xlim([xmin, xmax])
+ ax[1, 2].set_ylim([ymin, ymax])
+ ax[1, 2].set_xticks([])
+ ax[1, 2].set_yticks([])
+ ax[1, 2].set_title('v (Meas.)')
+ fig.colorbar(cf, ax=ax[1, 2], fraction=0.046, pad=0.04)
+ #
+ plt.savefig(fig_save_path + 'uv_comparison_'+str(num).zfill(3)+'.png')
+ plt.close('all')
+
+ pred = np.stack((u_pred, v_pred), axis=0).reshape(-1, 1)
+ label = np.stack((u_star, v_star), axis=0).reshape(-1, 1)
+ diff_norms = np.square(pred - label).sum()
+ label_norms = np.square(label).sum()
+ return diff_norms / label_norms
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/src/trainer.py b/MindFlow/applications/data_mechanism_fusion/PerCNN/src/trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..df74fe5e991d105271a6371c1a3e1f01eceff9fa
--- /dev/null
+++ b/MindFlow/applications/data_mechanism_fusion/PerCNN/src/trainer.py
@@ -0,0 +1,110 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""trainer"""
+import numpy as np
+import scipy.io as sio
+
+from mindspore import nn, ops, Tensor, float32, jit_class
+
+from .constant import dx_2d_op, dy_2d_op, lap_2d_op
+
+
+@jit_class
+class Trainer:
+ """Trainer"""
+
+ def __init__(self, model, time_steps=400, dx=1.0/100, dt=0.00025, nu=1/200, compute_dtype=float32):
+ self.model = model
+ self.loss = nn.MSELoss()
+ self.dx = dx
+ self.dt = dt
+ self.nu = nu
+ self.compute_dtype = compute_dtype
+ self.pec = 0.05
+
+ mat = sio.loadmat('./data/Burgers_2001x2x100x100_[dt=00025].mat')['uv']
+ self.truth_clean = mat[100:1901]
+ uv = self.add_noise(self.truth_clean)
+
+ self.truth = Tensor(uv[:time_steps+1], dtype=self.compute_dtype)
+ self.init_state_low = Tensor(
+ uv[0:1, :, ::2, ::2], dtype=self.compute_dtype)
+
+ self.dx_kernel = Tensor(np.array(dx_2d_op) /
+ self.dx, self.compute_dtype)
+ self.dy_kernel = Tensor(np.array(dy_2d_op) /
+ self.dx, self.compute_dtype)
+ self.lap_kernel = Tensor(
+ np.array(lap_2d_op) / self.dx**2, self.compute_dtype)
+
+ def add_noise(self, truth):
+ noise = np.random.normal(size=(truth.shape))
+ std = np.std(truth)
+ return truth + self.pec*std*noise
+
+ def get_ic_loss(self):
+ init_state_bicubic = ops.interpolate(
+ self.init_state_low, size=(100, 100), mode='bicubic')
+ ini_state_pred = self.model.upconv_block(self.init_state_low)
+ return self.loss(ini_state_pred, init_state_bicubic)
+
+ def get_phy_loss(self, output):
+ """calculate the phy loss"""
+ output = ops.concat(
+ (output[:, :, :, -2:], output, output[:, :, :, 0:3]), axis=3)
+ output = ops.concat(
+ (output[:, :, -2:, :], output, output[:, :, 0:3, :]), axis=2)
+
+ laplace_u = ops.conv2d(output[0:-2, 0:1, :, :], self.lap_kernel)
+ laplace_v = ops.conv2d(output[0:-2, 1:2, :, :], self.lap_kernel)
+
+ u_x = ops.conv2d(output[0:-2, 0:1, :, :], self.dx_kernel)
+ u_y = ops.conv2d(output[0:-2, 0:1, :, :], self.dy_kernel)
+ v_x = ops.conv2d(output[0:-2, 1:2, :, :], self.dx_kernel)
+ v_y = ops.conv2d(output[0:-2, 1:2, :, :], self.dy_kernel)
+
+ u_t = (output[1:-1, 0:1, 2:-2, 2:-2] -
+ output[0:-2, 0:1, 2:-2, 2:-2]) / self.dt
+ v_t = (output[1:-1, 1:2, 2:-2, 2:-2] -
+ output[0:-2, 1:2, 2:-2, 2:-2]) / self.dt
+
+ u = output[0:-2, 0:1, 2:-2, 2:-2]
+ v = output[0:-2, 1:2, 2:-2, 2:-2]
+
+ f_u = u_t - self.nu*laplace_u + u*u_x + v*u_y
+ f_v = v_t - self.nu*laplace_v + u*v_x + v*v_y
+
+ return self.loss(f_u, ops.zeros_like(f_u)) + self.loss(f_v, ops.zeros_like(f_v))
+
+ def get_loss(self):
+ """get loss"""
+ output, _ = self.model(self.init_state_low)
+ output = ops.concat(tuple(output), axis=0)
+
+ pred = output[0:-1:40, :, ::2, ::2]
+ gt = self.truth[::40, :, ::2, ::2]
+ idx = int(pred.shape[0] * 0.9)
+
+ pred_tra, pred_val = pred[:idx], pred[idx:] # prediction
+ gt_tra, gt_val = gt[:idx], gt[idx:] # ground truth
+
+ loss_data = self.loss(pred_tra, gt_tra)
+ loss_valid = self.loss(pred_val, gt_val)
+ loss_ic = self.get_ic_loss()
+
+ loss_phy = self.get_phy_loss(output)
+
+ return 1.0*loss_data + 2.0*loss_ic, loss_data, loss_ic, loss_phy, loss_valid
diff --git a/MindFlow/applications/data_mechanism_fusion/PerCNN/train.py b/MindFlow/applications/data_mechanism_fusion/PerCNN/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..f797645856777d9050f553560e94a4117844dc2d
--- /dev/null
+++ b/MindFlow/applications/data_mechanism_fusion/PerCNN/train.py
@@ -0,0 +1,166 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train"""
+import argparse
+import time
+import numpy as np
+
+from mindspore import ops, context, nn, set_seed, save_checkpoint, jit, load_checkpoint, load_param_into_net
+import mindspore.common.dtype as mstype
+from mindflow.utils import load_yaml_config
+
+from src import RCNN
+from src import Trainer
+from src import post_process_v2
+
+set_seed(123456)
+np.random.seed(123456)
+
+parser = argparse.ArgumentParser(description="burgers train")
+parser.add_argument("--mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
+ help="Running in GRAPH_MODE OR PYNATIVE_MODE")
+parser.add_argument("--save_graphs", type=bool, default=False, choices=[True, False],
+ help="Whether to save intermediate compilation graphs")
+parser.add_argument("--save_graphs_path", type=str, default="./graphs")
+parser.add_argument("--device_target", type=str, default="GPU", choices=["GPU", "Ascend"],
+ help="The target device to run, support 'Ascend', 'GPU'")
+parser.add_argument("--device_id", type=int, default=1,
+ help="ID of the target device")
+parser.add_argument("--config_file_path", type=str, default="./percnn_burgers.yaml")
+args = parser.parse_args()
+
+context.set_context(mode=context.GRAPH_MODE if args.mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
+ save_graphs=args.save_graphs,
+ save_graphs_path=args.save_graphs_path,
+ device_target=args.device_target,
+ device_id=args.device_id,
+ max_call_depth=99999999)
+print(
+ f"Running in {args.mode.upper()} mode, using device id: {args.device_id}.")
+use_ascend = context.get_context(attr_key='device_target') == "Ascend"
+
+
+def pretrain(trainer):
+ """pretrain"""
+ pretrain_config = load_yaml_config(args.config_file_path)["pretrain"]
+ learning_rate = pretrain_config['learning_rate']
+ optimizer = nn.Adam(trainer.model.trainable_params(),
+ learning_rate=learning_rate)
+
+ def forward_fn():
+ return trainer.get_ic_loss()
+
+ grad_fn = ops.value_and_grad(
+ forward_fn, None, trainer.model.trainable_params(), has_aux=False)
+
+ @ jit
+ def train_step():
+ loss, grads = grad_fn()
+ loss = ops.depend(loss, optimizer(grads))
+ return loss
+
+ for epoch in range(1, 1 + pretrain_config['epochs']):
+ time_beg = time.time()
+ model.set_train(True)
+ step_train_loss = train_step()
+ print(
+ f"epoch: {epoch} train loss: {step_train_loss} epoch time: {(time.time() - time_beg) :.3f} s")
+
+
+def train(trainer):
+ """train"""
+ train_config = load_yaml_config(args.config_file_path)["train"]
+
+ milestone = list([100*(i + 1) for i in range(150)])
+ learning_rates = list(
+ [train_config['learning_rate']*train_config['gama']**i for i in range(150)])
+ step_lr = nn.piecewise_constant_lr(milestone, learning_rates)
+ optimizer = nn.Adam(trainer.model.trainable_params(),
+ learning_rate=step_lr)
+ best_loss = 100000
+
+ def forward_fn():
+ return trainer.get_loss()
+
+ grad_fn = ops.value_and_grad(
+ forward_fn, None, trainer.model.trainable_params(), has_aux=True)
+
+ @ jit
+ def train_step():
+ res, grads = grad_fn()
+ res = ops.depend(res, optimizer(grads))
+ return res
+
+ for epoch in range(1, 1 + train_config['epochs']):
+ time_beg = time.time()
+ model.set_train(True)
+ step_train_loss, loss_data, loss_ic, loss_phy, loss_valid = train_step()
+ print(f"epoch: {epoch} train loss: {step_train_loss} ic_loss: {loss_ic} data_loss: {loss_data} \
+ val_loss: {loss_valid} phy_loss: {loss_phy} epoch time: {(time.time() - time_beg): .3f} s")
+ if step_train_loss < best_loss:
+ best_loss = step_train_loss
+ print('best loss', best_loss, 'save model')
+ save_checkpoint(model, "./model/checkpoint" +
+ train_config['name_conf'] + ".ckpt")
+
+
+if __name__ == '__main__':
+ INPUT_CHANNELS = 2
+ HIDDEN_CHANNELS = 8
+ INPUT_KERNEL_SIZE = 5
+ INFER_STEP = 1800
+ effective_step = list(range(0, INFER_STEP + 1))
+
+ model = RCNN(input_channels=INPUT_CHANNELS,
+ hidden_channels=HIDDEN_CHANNELS,
+ input_kernel_size=INPUT_KERNEL_SIZE,
+ infer_step=INFER_STEP,
+ effective_step=effective_step)
+
+ percnn_trainer = Trainer(model,
+ time_steps=INFER_STEP,
+ dx=1.0/100,
+ dt=0.00025,
+ nu=1/200,
+ compute_dtype=mstype.float32)
+
+ pretrain(percnn_trainer)
+ train(percnn_trainer)
+
+ config = load_yaml_config(args.config_file_path)
+ ckpt_file_name = config["ckpt_file_name"]
+ param_dict = load_checkpoint(ckpt_file_name)
+ load_param_into_net(model, param_dict)
+
+ output, _ = percnn_trainer.model(percnn_trainer.init_state_low)
+ output = ops.concat(output, axis=0)
+ output = ops.concat((output, output[:, :, :, 0:1]), axis=3)
+ output = ops.concat((output, output[:, :, 0:1, :]), axis=2)
+ truth_clean = np.concatenate(
+ (percnn_trainer.truth_clean, percnn_trainer.truth_clean[:, :, :, 0:1]), axis=3)
+ truth_clean = np.concatenate(
+ (truth_clean, truth_clean[:, :, 0:1, :]), axis=2)
+ low_res = percnn_trainer.truth[:, :, ::2, ::2]
+
+ output, low_res = output.asnumpy(), low_res.asnumpy()
+ fig_save_path = config["fig_save_path"]
+
+ print(output.shape, truth_clean.shape, low_res.shape)
+
+ for i in range(0, INFER_STEP + 1, 10):
+ err = post_process_v2(output, truth_clean, low_res, xmin=0, xmax=1, ymin=0, ymax=1,
+ num=i, fig_save_path=fig_save_path)
+ print(i, err)
diff --git a/MindFlow/applications/data_mechanism_fusion/TurbAI/Readme.md b/MindFlow/applications/data_mechanism_fusion/TurbAI/Readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..964526493ecfa488e7c2c7d9da1eeced80d22273
--- /dev/null
+++ b/MindFlow/applications/data_mechanism_fusion/TurbAI/Readme.md
@@ -0,0 +1,219 @@
+### One、Overview
+
+Turbulence AI model is a high-precision AI simulation model for high Reynolds number problems in aerospace engineering developed based on Ascend AI and supported by the MindSpore fluid simulation suite. A large-scale parallel intelligent turbulence simulation method for large passenger aircraft wing and wing body assembly is established, which greatly improves the calculation efficiency and precision of traditional turbulence simulation method, and the absolute error of flow field precision is less than 5%, which reaches the industrial standard.
+
+This tutorial introduces the research background and technical path of the turbulence AI model, and shows how to train the model through MindFlow. The trained model will be released in Hongshan Community.
+
+### Two、Background introduction
+
+Since Platt proposed boundary layer theory in 1904, turbulence simulation has been studied for more than 100 years, but no essential breakthrough has been made. Although the traditional RANS turbulent closed model has been widely used in aerospace and other engineering fields. However, the prediction ability is relatively accurate only in the flow dominated by small angle of attack and attached flow.
+
+For vortex-dominated and separation-dominated flow problems, such as aircraft maneuvering flight and control rate design at high angles of attack, accurate evaluation of drag and noise of civil aircraft, and aerodynamic thermal and thermal protection design of hypersonic vehicles, there are still no accurate turbulence simulation methods suitable for engineering problems. It must rely on wind tunnel experiments and even flight tests.
+
+However, the DNS simulation of complex turbulent fields still needs more than trillion degrees of freedom. Traditional methods often take several months or even years, which has become an important bottleneck in the development of high-end equipment and faces the double challenges of computing efficiency and precision.
+
+Common turbulence models can be classified according to the number of differential equations employed: zero-equation model, one-equation model, and two-equation model. There are two kinds of zero-equation models, which are C-S model proposed by Cebeci-Smith and B-L model proposed by Baldwin Lomax. The one-equation model can be divided into two types: the S-A model developed from empirical and dimensional analysis for simple flows and the Baldwin Barth model simplified from the two-equation model. Two-equation models widely used include k-e model and k-omega model. In addition, turbulence models also include Reynolds stress models.
+
+
+
+In recent years, with the significant improvement of computer computing and storage capabilities, AI technology has been widely applied in multiple fields. For turbulent flow problems, massive turbulent flow field data can be obtained through refined experimental measurement methods and high-resolution numerical simulation methods. By using the powerful computing capabilities of high-performance computers and advanced machine learning techniques to mine and analyze the flow field data, a new paradigm has been provided for building new turbulent flow models and solving accurate simulation problems. Developing turbulent flow AI intelligent models has become a new approach to reducing experimental risks and costs, improving simulation accuracy, and enhancing research and development efficiency.
+
+Based on this, Northwestern Polytechnical University and Huawei jointly developed a turbulence AI model based on MindSpore and Ascend, and verified it on two-dimensional airfoils, three-dimensional wings, and wing-body combinations. The model can achieve efficient and high-precision inference of the flow field when the geometric shape and inflow parameters (angle of attack/Mach number/Reynolds number) change within a certain range. It is bi-directionally coupled with CFD solution software to accurately predict the flow field, pressure/drag distribution, and lift/drag.
+
+### Three、Technical difficulties
+
+The challenges faced by turbulence modeling mainly include the following aspects:
+
+Firstly, **the scale difference of flow field variables caused by high Reynolds number.** From the wall to the outer edge of the boundary layer, the turbulent eddy viscosity evolves from almost zero to several thousand times the laminar viscosity, resulting in a large difference in numerical magnitude. Moreover, due to the large shear strain rate near the wall, higher accuracy is required for the eddy viscosity. However, under the classical mean square error loss function, simply increasing the number of neurons not only has an unclear effect but also easily causes overfitting problems. Therefore, it is not feasible to directly output the eddy viscosity as the model output.
+
+Secondly, **the generalization ability of the model.** The flow in the boundary layer is closely related to the change of Reynolds number, and high Reynolds number means strong nonlinearity between flow field variables. Whether the machine learning model can capture and to what extent the nonlinearity is a key factor affecting the generalization ability. This requires a carefully designed modeling strategy based on the physical laws of the flow and the characteristics of the flow field. In addition, the impact of the constructed and selected model input features and their forms on the generalization ability is also very important.
+
+Finally, **the convergence of the solver and the model after bidirectional coupling.** It is inevitable that the model output will have anomalies and non-smoothness, which will to some extent reduce the convergence of the solver. In addition, the high sensitivity to small changes in input will cause residual oscillations, slow convergence speed, or even divergence.
+
+### Four、Technical Path
+
+
+
+As shown in the figure above, the work is divided into two parts: modeling and coupling. The modeling process includes data acquisition, data preprocessing, feature construction and selection, and network training. The coupling process replaces the trained DNN model with the original turbulent model, and couples it to the CFD solver to participate in the iterative process of flow field changes, ultimately obtaining a converged flow field.
+
+**Feature construction and selection:**
+
+In feature construction, physically meaningful features are selected, including X-direction velocity, velocity vorticity norm, entropy, strain rate, wall distance, and transformation formulas, etc., as model inputs. To ensure that the feature construction can be calculated on Ascend, fp32 precision is used for calculation. At the same time, in order to improve the accuracy of the model's prediction of the near-wall region's turbulent viscosity coefficient, the turbulent viscosity coefficient is scaled with respect to the wall distance.
+
+$$trans=e^{\sqrt{\frac{Re^{-0.56}}{dis}}}$$
+
+$$\mu_{T_{trans}} = \mu_T * trans$$
+
+**Model design:**
+
+The full-connected neural network is chosen to predict the eddy viscosity coefficient. The network has four hidden layers, and the number of neurons in each layer is 128, 64, 64, and 64, respectively. The activation function between layers is ReLU, and mixed precision training is enabled during training.
+
+
+
+The loss function of the model is as follows:
+
+$$Loss = Lx0 + Lx1 + Lx2$$
+
+$Lx0$ Penalize negative numbers so that the predicted value is not less than 0.:$Lx0 = \overline{{\frac{|Pred|-Pred}{2.0}}^2}$
+
+$Lx1$ Calculate the mean square error of the predicted value and label.
+
+$Lx2$ is the loss of Reynolds stress near the wall
+
+The batch size of training is 256, the initial learning rate is 0.001. As the training continues and the error decreases, the learning rate is dynamically reduced. When the training epoch is 300, the errors of the training set and validation set tend to be stable, and the error stabilizes at the level of 1e-5.
+
+We conducted verification of variable operating conditions and shapes on a three-dimensional mesh with millions of nodes:
+
+Based on the M6 wing profile, verification is carried out under various working conditions such as variable angle of attack, variable Reynolds number, and variable Mach number:
+
+
+
+
Dataset
+
Ma
+
AoA(deg)
+
Re(1e6)
+
+
+
train
+
0.76
+
1.25
+
11.71
+
+
+
0.79
+
3.70
+
11.71
+
+
+
val
+
0.83
+
3.02
+
11.71
+
+
+
test
+
0.84
+
3.02
+
11.71
+
+
+
0.699
+
3.06
+
11.76
+
+
+
0.15
+
3.06
+
11.71
+
+
+
+The average error of concentration is 2%:
+
+
+
+
Ma
+
AoA
+
Re(1e6)
+
CL
+
CD
+
+
+
SA
+
DNN
+
Error
+
SA
+
DNN
+
+
+
0.15
+
3.06
+
11.71
+
1.46e-1
+
1.49e-1
+
2.06%
+
8.64e-3
+
8.20e-3
+
+
+
0.83
+
3.02
+
11.71
+
1.95e-1
+
1.96e-1
+
0.33%
+
1.23e-2
+
1.22e-2
+
+
+
0.84
+
3.02
+
11.71
+
1.99e-1
+
2.01e-1
+
0.73%
+
1.32e-2
+
1.30e-2
+
+
+
0.5
+
5
+
11.71
+
2.55e-1
+
2.58e-1
+
1.11%
+
1.36e-2
+
1.32e-2
+
+
+
0.699
+
3.06
+
11.71
+
1.69e-1
+
1.73e-1
+
2.10%
+
9.744e-3
+
9.42e-3
+
+
+
+Ma=0.84 Aoa=3.02°Re=11.71e6(Different cross-sections of cf and cp indicate distribution.)
+
+
+
+Ma=0.83 Aoa=3.02°Re=11.71e6(Different cross-sections of cf and cp indicate distribution.)
+
+
+
+Based on the training using the shapes of M6, DPW-W1, F6, etc., generalization is performed on the DPW-W2 wing:
+
+Ma=0.76 AoA=1.25 Re=5e6
+
+
"
+ ]
+ },
+ "metadata": {},
+ "output_type": "display_data"
+ }
+ ],
+ "source": [
+ "from src.utils import calculate_l2_error, visual\n",
+ "\n",
+ "# Create the dataset\n",
+ "ds_test = create_test_dataset(config)\n",
+ "\n",
+ "# Evaluate the model\n",
+ "calculate_l2_error(model, ds_test)\n",
+ "\n",
+ "# Visual comparison of label and prediction\n",
+ "visual(model, ds_test)"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3.8.16 ('ms')",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.16"
+ },
+ "vscode": {
+ "interpreter": {
+ "hash": "c22ff8496cdfc43b41d028d0afe27e7d77fc6967d8e63387d8409afb66bbd90b"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/MindFlow/applications/physics_driven/poisson_point_source/poisson_cfg.yaml b/MindFlow/applications/physics_driven/poisson_point_source/poisson_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2e1148f3abe531f8ac0e09ed04441e53ae79f84f
--- /dev/null
+++ b/MindFlow/applications/physics_driven/poisson_point_source/poisson_cfg.yaml
@@ -0,0 +1,28 @@
+rectangle: # whole solution region
+ coord_min: [0., 0.]
+ coord_max: [3.141592653589793, 3.141592653589793] # [pi, pi]
+rectangle_src: # source region
+ coord_min: [1.5307963267948965, 1.5307963267948965] # [pi/2 - 0.04, pi/2 - 0.04]
+ coord_max: [1.6107963267948966, 1.6107963267948966] # [pi/2 + 0.04, pi/2 + 0.04]
+data:
+ domain:
+ random_sampling: True
+ size: 1000000
+ # sampler: 'uniform'
+ sampler: 'lhs'
+ BC:
+ random_sampling: True
+ size: 1000000
+ # sampler: 'uniform'
+ sampler: 'lhs'
+batch_size: 5000
+optimizer:
+ initial_lr: 1.0e-3
+model:
+ in_channels: 2
+ out_channels: 1
+ layers: 5
+ neurons: 128
+ num_scales: 2
+ activation: "sin"
+keep_checkpoint_max: 2
diff --git a/MindFlow/applications/physics_driven/poisson_point_source/src/dataset.py b/MindFlow/applications/physics_driven/poisson_point_source/src/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..b60bbfdd3b1d2af626a3e6158d26af07c1407896
--- /dev/null
+++ b/MindFlow/applications/physics_driven/poisson_point_source/src/dataset.py
@@ -0,0 +1,71 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Create dataset."""
+import math
+import numpy as np
+
+import mindspore as ms
+from mindflow.data import Dataset
+from mindflow.geometry import Rectangle, generate_sampling_config
+
+
+def create_train_dataset(config, shuffle=True):
+ """Create training dataset."""
+ sampling_config = generate_sampling_config(config['data'])
+
+ # The feasible region
+ region = Rectangle("rectangle", **config['rectangle'], sampling_config=sampling_config)
+
+ # The region where the point source is located
+ region_src = Rectangle(
+ "rectangle_src", **config['rectangle_src'],
+ sampling_config=sampling_config)
+
+ dataset = Dataset({region: ['domain', 'BC'], region_src: ["domain"]})
+
+ ds_train = dataset.create_dataset(
+ batch_size=config['batch_size'], shuffle=shuffle, prebatched_data=True, drop_remainder=True
+ )
+
+ return ds_train
+
+
+def create_test_dataset(config, n_samps_per_axis=100):
+ """Create testing dataset."""
+ axis_x = np.linspace(
+ config['rectangle']['coord_min'][0],
+ config['rectangle']['coord_max'][0],
+ n_samps_per_axis,
+ endpoint=True)
+ axis_y = np.linspace(
+ config['rectangle']['coord_min'][1],
+ config['rectangle']['coord_max'][1],
+ n_samps_per_axis,
+ endpoint=True)
+ mesh_x, mesh_y = np.meshgrid(axis_x, axis_y)
+ mesh = np.stack((mesh_x.flatten(), mesh_y.flatten()), axis=-1)
+
+ label = np.zeros(mesh.shape[0], dtype=np.float32) # Calculate the analytical solution
+ truncation_number = 100
+ x_src, y_src = math.pi / 2, math.pi / 2 # Coordinate of the point source
+ for i in range(1, truncation_number + 1):
+ for j in range(1, truncation_number + 1):
+ label += np.sin(i * mesh[:, 0]) * math.sin(i * x_src) * \
+ np.sin(j * mesh[:, 1]) * math.sin(j * y_src) / (i**2 + j**2)
+
+ label = label * 4.0 / (math.pi**2)
+
+ return (ms.Tensor(mesh, dtype=ms.float32), ms.Tensor(label, dtype=ms.float32))
diff --git a/MindFlow/applications/physics_driven/poisson_point_source/src/poisson.py b/MindFlow/applications/physics_driven/poisson_point_source/src/poisson.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3c14449075965c7aa9baa091c7f07c5d3175947
--- /dev/null
+++ b/MindFlow/applications/physics_driven/poisson_point_source/src/poisson.py
@@ -0,0 +1,67 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Define the Poisson equation."""
+import sympy
+from mindspore import numpy as ms_np
+from mindflow import PDEWithLoss, MTLWeightedLoss, sympy_to_mindspore
+
+
+class Poisson(PDEWithLoss):
+ """Define the loss of the Poisson equation."""
+
+ def __init__(self, model):
+ self.x, self.y = sympy.symbols("x y")
+ self.u = sympy.Function("u")(self.x, self.y)
+ self.in_vars = [self.x, self.y]
+ self.out_vars = [self.u,]
+ self.alpha = 0.01 # kernel width
+ super(Poisson, self).__init__(model, self.in_vars, self.out_vars)
+ self.bc_nodes = sympy_to_mindspore(self.bc(), self.in_vars, self.out_vars)
+ self.loss_fn = MTLWeightedLoss(num_losses=3)
+
+ def pde(self):
+ """Define the gonvering equation."""
+ uu_xx = sympy.diff(self.u, (self.x, 2))
+ uu_yy = sympy.diff(self.u, (self.y, 2))
+
+ # Use Laplace probability density function to approximate the Dirac \delta function.
+ x_src = sympy.pi / 2
+ y_src = sympy.pi / 2
+ force_term = 0.25 / self.alpha**2 * sympy.exp(-(
+ sympy.Abs(self.x - x_src) + sympy.Abs(self.y - y_src)) / self.alpha)
+
+ poisson = uu_xx + uu_yy + force_term
+ equations = {"poisson": poisson}
+ return equations
+
+ def bc(self):
+ """Define the boundary condition."""
+ bc_eq = self.u
+
+ equations = {"bc": bc_eq}
+ return equations
+
+ def get_loss(self, pde_data, bc_data, src_data):
+ """Define the loss function."""
+ res_pde = self.parse_node(self.pde_nodes, inputs=pde_data)
+ res_bc = self.parse_node(self.bc_nodes, inputs=bc_data)
+ res_src = self.parse_node(self.pde_nodes, inputs=src_data)
+
+ loss_pde = ms_np.mean(ms_np.square(res_pde[0]))
+ loss_bc = ms_np.mean(ms_np.square(res_bc[0]))
+ loss_src = ms_np.mean(ms_np.square(res_src[0]))
+
+ return self.loss_fn((loss_pde, loss_bc, loss_src))
diff --git a/MindFlow/applications/physics_driven/poisson_point_source/src/utils.py b/MindFlow/applications/physics_driven/poisson_point_source/src/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..40548ec034975652be209034183a59ba149aadeb
--- /dev/null
+++ b/MindFlow/applications/physics_driven/poisson_point_source/src/utils.py
@@ -0,0 +1,92 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""utility functions"""
+import os
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib import gridspec
+from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
+
+
+def plot_2d(u_label, u_predict, file_name=None):
+ """Draw the image containing the label and the prediction."""
+ u_error = np.abs(u_label - u_predict)
+
+ vmin_u = u_label.min()
+ vmax_u = u_label.max()
+ vmin_error = u_error.min()
+ vmax_error = u_error.max()
+ vmin = [vmin_u, vmin_u, vmin_error]
+ vmax = [vmax_u, vmax_u, vmax_error]
+
+ sub_titles = ["Reference", "Predict", "Error"]
+
+ plt.rcParams['figure.figsize'] = [9.6, 3.2]
+ fig = plt.figure()
+ gs_ = gridspec.GridSpec(2, 6)
+ slice_ = [gs_[0:2, 0:2], gs_[0:2, 2:4], gs_[0:2, 4:6]]
+ for i, data in enumerate([u_label, u_predict, u_error]):
+ ax_ = fig.add_subplot(slice_[i])
+
+ img = ax_.imshow(
+ data.T, vmin=vmin[i],
+ vmax=vmax[i],
+ cmap=plt.get_cmap("jet"),
+ origin='lower')
+
+ ax_.set_title(sub_titles[i], fontsize=10)
+ plt.xticks(())
+ plt.yticks(())
+
+ aspect = 20
+ pad_fraction = 0.5
+ divider = make_axes_locatable(ax_)
+ width = axes_size.AxesY(ax_, aspect=1 / aspect)
+ pad = axes_size.Fraction(pad_fraction, width)
+ cax = divider.append_axes("right", size=width, pad=pad)
+ cb_ = plt.colorbar(img, cax=cax)
+ cb_.ax.tick_params(labelsize=6)
+
+ gs_.tight_layout(fig, pad=1.0, w_pad=3.0, h_pad=1.0)
+ if file_name is None:
+ plt.show()
+ else:
+ os.makedirs("./images", exist_ok=True)
+ fig.savefig(os.path.join("./images", file_name))
+ plt.close()
+
+
+def visual(model, ds_test, n_samps_per_axis=100, file_name=None):
+ """Visual comparison of label and prediction"""
+ mesh, label = ds_test[0], ds_test[1]
+ pred = model(mesh).asnumpy()
+ label = label.asnumpy()
+ plot_2d(label.reshape(n_samps_per_axis, n_samps_per_axis),
+ pred.reshape(n_samps_per_axis, n_samps_per_axis),
+ file_name=file_name)
+
+
+def calculate_l2_error(model, ds_test):
+ """Calculate the relative L2 error."""
+ mesh, label = ds_test[0], ds_test[1]
+ pred = model(mesh).asnumpy().flatten()
+ label = label.asnumpy().flatten()
+
+ error_norm = np.linalg.norm(pred - label, ord=2)
+ label_norm = np.linalg.norm(label, ord=2)
+ relative_l2_error = error_norm / label_norm
+
+ print(f"Relative L2 error: {relative_l2_error:>8.4f}")
diff --git a/MindFlow/applications/physics_driven/poisson_point_source/train.py b/MindFlow/applications/physics_driven/poisson_point_source/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..d223ccdf328ec10fb24e46f460c5735059fbce9f
--- /dev/null
+++ b/MindFlow/applications/physics_driven/poisson_point_source/train.py
@@ -0,0 +1,145 @@
+# ============================================================================
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Training."""
+import os
+import time
+import argparse
+import numpy as np
+
+from src.dataset import create_train_dataset, create_test_dataset
+from src.poisson import Poisson
+from src.utils import calculate_l2_error, visual
+
+from mindspore import context, save_checkpoint, nn, ops, jit, set_seed
+from mindflow import load_yaml_config
+from mindflow.cell import MultiScaleFCSequential
+
+set_seed(123456)
+np.random.seed(123456)
+
+
+def train(file_cfg, ckpt_dir, n_epochs):
+ """Train a model."""
+ # Load config
+ config = load_yaml_config(file_cfg)
+
+ # Create the dataset
+ ds_train = create_train_dataset(config)
+ ds_test = create_test_dataset(config)
+
+ # Create the model
+ model = MultiScaleFCSequential(config['model']['in_channels'],
+ config['model']['out_channels'],
+ config['model']['layers'],
+ config['model']['neurons'],
+ residual=True,
+ act=config['model']['activation'],
+ num_scales=config['model']['num_scales'],
+ amp_factor=1.0,
+ scale_factor=2.0,
+ input_scale=[10., 10.],
+ )
+ print(model)
+
+ # Create the problem and optimizer
+ problem = Poisson(model)
+
+ params = model.trainable_params() + problem.loss_fn.trainable_params()
+ steps_per_epoch = ds_train.get_dataset_size()
+ milestone = [int(steps_per_epoch * n_epochs * x) for x in [0.4, 0.6, 0.8]]
+ lr_init = config["optimizer"]["initial_lr"]
+ learning_rates = [lr_init * (0.1**x) for x in [0, 1, 2]]
+ lr_ = nn.piecewise_constant_lr(milestone, learning_rates)
+ optimizer = nn.Adam(params, learning_rate=lr_)
+
+ # Prepare loss scaler
+ if use_ascend:
+ from mindspore.amp import DynamicLossScaler, all_finite
+ loss_scaler = DynamicLossScaler(1024, 2, 100)
+ auto_mixed_precision(model, 'O3')
+ else:
+ loss_scaler = None
+
+ def forward_fn(pde_data, bc_data, src_data):
+ loss = problem.get_loss(pde_data, bc_data, src_data)
+ if use_ascend:
+ loss = loss_scaler.scale(loss)
+ return loss
+ grad_fn = ops.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=False)
+
+ @jit
+ def train_step(pde_data, bc_data, src_data):
+ loss, grads = grad_fn(pde_data, bc_data, src_data)
+ if use_ascend:
+ loss = loss_scaler.unscale(loss)
+ if all_finite(grads):
+ grads = loss_scaler.unscale(grads)
+ loss = ops.depend(loss, optimizer(grads))
+ return loss
+
+ def train_epoch(model, dataset, i_epoch):
+ local_time_beg = time.time()
+
+ model.set_train()
+ for _, (pde_data, bc_data, src_data) in enumerate(dataset):
+ loss = train_step(pde_data, bc_data, src_data)
+
+ print(
+ f"epoch: {i_epoch} train loss: {float(loss):.8f}" +
+ f" epoch time: {time.time() - local_time_beg:.2f}s")
+
+ keep_ckpt_max = config['keep_checkpoint_max']
+
+ for i_epoch in range(1, 1 + n_epochs):
+ train_epoch(model, ds_train, i_epoch)
+
+ # Save last checkpoints
+ save_name = os.path.join(ckpt_dir, f"epoch-{i_epoch % keep_ckpt_max}.ckpt")
+ save_checkpoint(model, save_name)
+
+ if i_epoch % 5 == 1 or i_epoch == n_epochs:
+ # Evaluate the model
+ calculate_l2_error(model, ds_test)
+
+ # Visual comparison of label and prediction
+ visual(model, ds_test, file_name=f"epoch-{i_epoch}_result.png")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="poisson")
+ parser.add_argument("--mode", type=str, default="GRAPH", choices=["GRAPH", "PYNATIVE"],
+ help="Running in GRAPH_MODE OR PYNATIVE_MODE")
+ parser.add_argument("--save_graphs", type=bool, default=False, choices=[True, False],
+ help="Whether to save intermediate compilation graphs")
+ parser.add_argument("--save_graphs_path", type=str, default="./graphs")
+ parser.add_argument("--device_target", type=str, default="GPU", choices=["GPU", "Ascend"],
+ help="The target device to run, support 'Ascend', 'GPU'")
+ parser.add_argument("--device_id", type=int, default=0, help="ID of the target device")
+ parser.add_argument('--ckpt_dir', default='./')
+ parser.add_argument('--n_epochs', default=250, type=int)
+ parser.add_argument("--config_file_path", type=str, default="./poisson_cfg.yaml")
+ args = parser.parse_args()
+
+ context.set_context(
+ mode=context.GRAPH_MODE if args.mode.upper().startswith("GRAPH") else context.PYNATIVE_MODE,
+ save_graphs=args.save_graphs, save_graphs_path=args.save_graphs_path,
+ device_target=args.device_target, device_id=args.device_id)
+ use_ascend = context.get_context(attr_key='device_target') == "Ascend"
+
+ print(f'pid: {os.getpid()}')
+ time_beg = time.time()
+ train(args.config_file_path, args.ckpt_dir, args.n_epochs)
+ print(f"End-to-End total time: {time.time() - time_beg:.1f} s")
diff --git a/MindFlow/applications/physics_driven/taylor_green/2d/taylor_green_2D_CN.ipynb b/MindFlow/applications/physics_driven/taylor_green/2d/taylor_green_2D_CN.ipynb
index 1ae6f7106b7353a060a901e8b157c3d97416c563..126f075914a19e23733665b5eaf42b64be04b105 100644
--- a/MindFlow/applications/physics_driven/taylor_green/2d/taylor_green_2D_CN.ipynb
+++ b/MindFlow/applications/physics_driven/taylor_green/2d/taylor_green_2D_CN.ipynb
@@ -302,10 +302,10 @@
" model.set_train(True)\n",
" for _ in range(steps_per_epochs):\n",
" step_train_loss = sink_process()\n",
+ " print(f\"epoch: {epoch} train loss: {step_train_loss} epoch time: {(time.time() - time_beg) * 1000 :.3f} ms\")\n",
" model.set_train(False)\n",
"\n",
" if epoch % config[\"eval_interval_epochs\"] == 0:\n",
- " print(f\"epoch: {epoch} train loss: {step_train_loss} epoch time: {(time.time() - time_beg) * 1000 :.3f} ms\")\n",
" calculate_l2_error(model, inputs, label, config)"
]
},
diff --git a/MindFlow/docs/partners/CACC.jpeg b/MindFlow/docs/partners/CACC.jpeg
index ff2b1966985da2e8b6c1286a8691be8cef6f02c4..f159c8a032346d725ea97090b7afd81a4141b5c7 100644
Binary files a/MindFlow/docs/partners/CACC.jpeg and b/MindFlow/docs/partners/CACC.jpeg differ
diff --git a/MindFlow/docs/partners/NorthwesternPolytechnical.jpeg b/MindFlow/docs/partners/NorthwesternPolytechnical.jpeg
index 9bae48001b1c32b9e6fce0dc207caff5f3ba6f06..d14dbfdf925d5976c389889dcbc107c5a831fdcb 100644
Binary files a/MindFlow/docs/partners/NorthwesternPolytechnical.jpeg and b/MindFlow/docs/partners/NorthwesternPolytechnical.jpeg differ
diff --git a/MindFlow/docs/partners/Peking_University.jpeg b/MindFlow/docs/partners/Peking_University.jpeg
index 2bdab7c00eb117c666f797145002056bd9833a48..7ca30a8f00de6ecf069f055f71b3ac9a5f772eac 100644
Binary files a/MindFlow/docs/partners/Peking_University.jpeg and b/MindFlow/docs/partners/Peking_University.jpeg differ
diff --git a/MindFlow/docs/partners/RenminUniversity.jpeg b/MindFlow/docs/partners/RenminUniversity.jpeg
index 2e761bb67ab9fbc7a5379badb654bd35af425dde..64c5f5490a004cb1a396b73d7dc5eb09ab0494a5 100644
Binary files a/MindFlow/docs/partners/RenminUniversity.jpeg and b/MindFlow/docs/partners/RenminUniversity.jpeg differ
diff --git a/MindFlow/docs/partners/TaiHuLaboratory.jpeg b/MindFlow/docs/partners/TaiHuLaboratory.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..c3778272e716deb2f65a9ca579b7cb2ed2175962
Binary files /dev/null and b/MindFlow/docs/partners/TaiHuLaboratory.jpeg differ
diff --git a/MindFlow/features/mindspore_grad_cookbook.ipynb b/MindFlow/features/mindspore_grad_cookbook.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..f0b932ea42c5bcd5fd6ac0c2aeb7cd5ded4ab122
--- /dev/null
+++ b/MindFlow/features/mindspore_grad_cookbook.ipynb
@@ -0,0 +1,858 @@
+{
+ "cells": [
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# MindSpore自动微分快速教程\n",
+ "\n",
+ "MindSpore拥有完善的自动微分系统。本文将会借着对自动微分思想的介绍来展示MindSpore自动微分的各项能力,方便读者运用在自己的项目中。\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import mindspore as ms\n",
+ "import numpy as np\n",
+ "import mindspore.ops as ops\n",
+ "from mindspore import context\n",
+ "from mindspore import Tensor\n",
+ "from mindspore import grad\n",
+ "from mindspore import dtype as mstype\n",
+ "\n",
+ "context.set_context(mode=ms.GRAPH_MODE)\n",
+ "np.random.seed(0)\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "0.070650816\n"
+ ]
+ }
+ ],
+ "source": [
+ "grad_tanh = grad(ops.tanh)\n",
+ "print(grad_tanh(Tensor(2, mstype.float32)))\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`grad` 的入参为一个函数,返回的是求导后的函数。定义一个Python函数`f`用来计算数学函数$f$,`grad(f)` 就是表达$\\nabla f$的Python函数。 `grad(f)(x)` 就是$\\nabla f(x)$的值。\n",
+ "\n",
+ "由于 `grad` 作用在函数上,所以`grad`也可以用来处理它自己的输出:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "-0.13621867\n",
+ "0.25265405\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(grad(grad(ops.tanh))(Tensor(2, mstype.float32)))\n",
+ "print(grad(grad(grad(ops.tanh)))(Tensor(2, mstype.float32)))\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "下面是一个计算线性回归模型的梯度的例子,首先:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def sigmoid(x):\n",
+ " return 0.5 * (ops.tanh(x / 2) + 1)\n",
+ "\n",
+ "# Outputs probability of a label being true.\n",
+ "\n",
+ "\n",
+ "def predict(W, b, inputs):\n",
+ " return sigmoid(ops.inner(inputs, W) + b)\n",
+ "\n",
+ "\n",
+ "# Build a toy dataset.\n",
+ "inputs = Tensor(np.array([[0.52, 1.12, 0.77],\n",
+ " [0.88, -1.08, 0.15],\n",
+ " [0.52, 0.06, -1.30],\n",
+ " [0.74, -2.49, 1.39]]), ms.float32)\n",
+ "targets = Tensor(np.array([True, True, False, True]))\n",
+ "\n",
+ "# Training loss is the negative log-likelihood of the training examples.\n",
+ "\n",
+ "\n",
+ "def loss(W, b):\n",
+ " preds = predict(W, b, inputs)\n",
+ " label_probs = preds * targets + (1 - preds) * (1 - targets)\n",
+ " return -ops.sum(ops.log(label_probs))\n",
+ "\n",
+ "\n",
+ "# Initialize random model coefficients\n",
+ "W = Tensor(np.random.rand(3,), ms.float32)\n",
+ "b = Tensor(np.random.rand(), ms.float32)\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "在`grad` 中使用 `grad_position`对指定的位置参数进行微分\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "W_grad [-0.5185027 1.5961987 -1.5178145]\n",
+ "W_grad [-0.5185027 1.5961987 -1.5178145]\n",
+ "b_grad -0.49954596\n",
+ "W_grad [-0.5185027 1.5961987 -1.5178145]\n",
+ "b_grad -0.49954596\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Differentiate `loss` with respect to the first positional argument:\n",
+ "W_grad = grad(loss, grad_position=0)(W, b)\n",
+ "print('W_grad', W_grad)\n",
+ "\n",
+ "# Since argnums=0 is the default, this does the same thing:\n",
+ "W_grad = grad(loss)(W, b)\n",
+ "print('W_grad', W_grad)\n",
+ "\n",
+ "# But we can choose different values too, and drop the keyword:\n",
+ "b_grad = grad(loss, 1)(W, b)\n",
+ "print('b_grad', b_grad)\n",
+ "\n",
+ "# Including tuple values\n",
+ "W_grad, b_grad = grad(loss, (0, 1))(W, b)\n",
+ "print('W_grad', W_grad)\n",
+ "print('b_grad', b_grad)\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "本质上来说,使用`grad_position`时,如果`f`是一个Python函数,那么表达式`grad(f, i)`就是在求偏微分$\\partial_i f$.\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## `value_and_grad`:同时获得函数值与梯度\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`value_and_grad`可以方便地同时计算函数值和梯度值:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "loss value 2.0792074\n",
+ "loss value 2.0792074\n"
+ ]
+ }
+ ],
+ "source": [
+ "from mindspore import value_and_grad\n",
+ "loss_value, Wb_grad = value_and_grad(loss, (0, 1))(W, b)\n",
+ "print('loss value', loss_value)\n",
+ "print('loss value', loss(W, b))\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 与数值计算结果比较\n",
+ "\n",
+ "自动微分可以很直接地与有限微分比较:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "b_grad_numerical -0.500679\n",
+ "b_grad_autodiff -0.49954596\n",
+ "W_dirderiv_numerical -1.7213821\n",
+ "W_dirderiv_autodiff -1.71724\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Set a step size for finite differences calculations\n",
+ "eps = 1e-4\n",
+ "\n",
+ "# Check b_grad with scalar finite differences\n",
+ "b_grad_numerical = (loss(W, b + eps / 2.) - loss(W, b - eps / 2.)) / eps\n",
+ "print('b_grad_numerical', b_grad_numerical)\n",
+ "print('b_grad_autodiff', grad(loss, 1)(W, b))\n",
+ "\n",
+ "# Check W_grad with finite differences in a random direction\n",
+ "# key, subkey = random.split(key)\n",
+ "vec = Tensor(np.random.normal(size=W.shape), mstype.float32)\n",
+ "unitvec = vec / ops.sqrt(ops.inner(vec, vec))\n",
+ "unitvec = unitvec.reshape(W.shape)\n",
+ "W_grad_numerical = (loss(W + eps / 2. * unitvec, b) -\n",
+ " loss(W - eps / 2. * unitvec, b)) / eps\n",
+ "print('W_dirderiv_numerical', W_grad_numerical)\n",
+ "print('W_dirderiv_autodiff', ops.inner(grad(loss)(W, b), unitvec))\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## `grad`+`grad`得到Hessian向量积\n",
+ "\n",
+ "使用高阶`grad`可以构造Hessian向量积。(后面我们会用前向模式和反向模式写一个更高效的实现)\n",
+ "\n",
+ "Hessian向量积可用来在[截断的牛顿共轭梯度法](https://en.wikipedia.org/wiki/Truncated_Newton_method)中最小化一个光滑的凸函数,或者用来判断神经网络训练目标的曲率性质。(如 [1](https://arxiv.org/abs/1406.2572), [2](https://arxiv.org/abs/1811.07062), [3](https://arxiv.org/abs/1706.04454), [4](https://arxiv.org/abs/1802.03451)).\n",
+ "\n",
+ "对于一个有着连续二阶导的标量函数(这种函数的Hessian矩阵是对称的)$f : \\mathbb{R}^n \\to \\mathbb{R}$,点$x \\in \\mathbb{R}^n$处的Hessian算子为$\\partial^2 f(x)$。一个Hessian向量积用来计算映射:\n",
+ "\n",
+ "$\\qquad v \\mapsto \\partial^2 f(x) \\cdot v$\n",
+ "\n",
+ "其中 $v \\in \\mathbb{R}^n$。\n",
+ "\n",
+ "有一个技巧是我们不能实例化整个Hessian矩阵:如果$n$很大的话(神经网络中可能达到百万或上亿的量级),完整的Hessian矩阵是没法存储的。\n",
+ "\n",
+ "幸运的是, `grad` 提供了一种高效计算Hessian向量积的方式。我们只需要有恒等式:\n",
+ "\n",
+ "$\\qquad \\partial^2 f (x) v = \\partial [x \\mapsto \\partial f(x) \\cdot v] = \\partial g(x)$,\n",
+ "\n",
+ "其中 $g(x) = \\partial f(x) \\cdot v$ 是一个新的标量函数,其表示 $f$在$x$的梯度与向量$v$的点乘。这里只涉及对标量函数的向量值的微分,这种情形下 `grad` 是高效的。\n",
+ "\n",
+ "用MindSpore代码,我们可以写出:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def hvp(f, x, v):\n",
+ " return grad(lambda x: ops.inner(grad(f)(x), v))(x)\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "这个例子表明我们可以自由的使用词汇闭包,MindSpore都可以正确处理。在后面我会看到Hessian矩阵是怎么被计算出来的,知晓了原理之后我们会同时运用前向模式和反向模式提供一个更高效的写法。\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 运用 `jacfwd` 和 `jacrev` 计算Jacobians 和 Hessians 矩阵\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "用户可以用 `jacfwd` 和 `jacrev`计算Jacobian矩阵:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "jacfwd result, with shape (4, 3)\n",
+ "[[ 0.05072299 0.10924952 0.07510904]\n",
+ " [ 0.21355031 -0.26208448 0.03640062]\n",
+ " [ 0.12973952 0.01496994 -0.3243488 ]\n",
+ " [ 0.18499702 -0.62249 0.3474944 ]]\n",
+ "jacrev result, with shape (4, 3)\n",
+ "[[ 0.05072299 0.10924952 0.07510904]\n",
+ " [ 0.21355031 -0.26208448 0.03640062]\n",
+ " [ 0.12973952 0.01496994 -0.3243488 ]\n",
+ " [ 0.18499702 -0.62249 0.3474944 ]]\n"
+ ]
+ }
+ ],
+ "source": [
+ "from mindspore import jacfwd, jacrev\n",
+ "\n",
+ "# Isolate the function from the weight matrix to the predictions\n",
+ "\n",
+ "\n",
+ "def f(W):\n",
+ " return predict(W, b, inputs)\n",
+ "\n",
+ "\n",
+ "J = jacfwd(f)(W)\n",
+ "print(\"jacfwd result, with shape\", J.shape)\n",
+ "print(J)\n",
+ "\n",
+ "J = jacrev(f)(W)\n",
+ "print(\"jacrev result, with shape\", J.shape)\n",
+ "print(J)\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "这两个函数得到的结果应该是一样的,二者只是实现方式不通: `jacfwd` 使用的是前向模式的自动微分,在比较\"高\"的Jacobian矩阵上较高效。 `jacrev` 使用的是反向模式,在\"宽\"的矩阵上更高效。对于比较方正的矩阵, `jacfwd` 效果稍好于`jacrev`。\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "关于前向模式和反向模式的更多信息,请继续阅读!\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "使用一种组合的方式计算dense的Hessian矩阵:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "hessian, with shape (4, 3, 3)\n",
+ "[[[-2.0597292e-02 -4.4363402e-02 -3.0499836e-02]\n",
+ " [-4.4363398e-02 -9.5551945e-02 -6.5691955e-02]\n",
+ " [-3.0499836e-02 -6.5691963e-02 -4.5163218e-02]]\n",
+ "\n",
+ " [[-3.2176636e-02 3.9489504e-02 -5.4846536e-03]\n",
+ " [ 3.9489508e-02 -4.8464395e-02 6.7311660e-03]\n",
+ " [-5.4846536e-03 6.7311660e-03 -9.3488418e-04]]\n",
+ "\n",
+ " [[-3.0198938e-03 -3.4844928e-04 7.5497343e-03]\n",
+ " [-3.4844928e-04 -4.0205687e-05 8.7112316e-04]\n",
+ " [ 7.5497343e-03 8.7112322e-04 -1.8874336e-02]]\n",
+ "\n",
+ " [[-5.4928247e-04 1.8482616e-03 -1.0317604e-03]\n",
+ " [ 1.8482613e-03 -6.2191500e-03 3.4717342e-03]\n",
+ " [-1.0317604e-03 3.4717345e-03 -1.9380364e-03]]]\n"
+ ]
+ }
+ ],
+ "source": [
+ "def hessian(f):\n",
+ " return jacfwd(jacrev(f))\n",
+ "\n",
+ "\n",
+ "H = hessian(f)(W)\n",
+ "print(\"hessian, with shape\", H.shape)\n",
+ "print(H)\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "这里的shape是合理的:$f : \\mathbb{R}^n \\to \\mathbb{R}^m$, 在点 $x \\in \\mathbb{R}^n$ 上,会有shape\n",
+ "\n",
+ "- $f(x) \\in \\mathbb{R}^m$, $f$ 在 $x$ 处的值,\n",
+ "- $\\partial f(x) \\in \\mathbb{R}^{m \\times n}$, $x$ 处的Jacobian矩阵,\n",
+ "- $\\partial^2 f(x) \\in \\mathbb{R}^{m \\times n \\times n}$, $x$ 处的Hessian矩阵\n",
+ "\n",
+ "`jacfwd(jacrev(f))` 或 `jacrev(jacfwd(f))` 或者二者任意的组合皆可实现一个`hessian`矩阵,只是 forward+reverse一般情况下是效率最高的方式。 这是因为里面一层的Jacobian计算经常会有针对宽Jacobian矩阵的微分(比如loss function $f : \\mathbb{R}^n \\to \\mathbb{R}$),在外面那一层的Jacobian 计算 通常是微分一个正方矩阵(因为会有$\\nabla f : \\mathbb{R}^n \\to \\mathbb{R}^n$),这时forward-mode速度更快。\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 深入理解两个基本的自动微分函数\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### Jacobian向量积 (JVPs, 前向模式自动微分)\n",
+ "\n",
+ "MindSpore对前向和反向的自动微分都提供了高效且泛用性强的实现。我们熟悉的 `grad` 是基于反向模式实现的,不过为了理解二者的区别,我们需要一点数学背景。\n",
+ "\n",
+ "### JVPs的数学背景\n",
+ "\n",
+ "从数学的角度看,给定一个函数 $f : \\mathbb{R}^n \\to \\mathbb{R}^m$,$f$ 在输入点 $x \\in \\mathbb{R}^n$ 的Jacobian矩阵可被记作 $\\partial f(x)$,通常型如 $\\mathbb{R}^m \\times \\mathbb{R}^n$:\n",
+ "\n",
+ "$\\qquad \\partial f(x) \\in \\mathbb{R}^{m \\times n}$.\n",
+ "\n",
+ "我们可以将 $\\partial f(x)$ 视为线性映射,把在点 $x$ 处 $f$ 定义域上的正切空间( 其实就是 $\\mathbb{R}^n$ 的一份拷贝)映射到了在点 $f(x)$ 处 $f$ 陪域上的正切空间($\\mathbb{R}^m$ 的拷贝)。\n",
+ "\n",
+ "$\\qquad \\partial f(x) : \\mathbb{R}^n \\to \\mathbb{R}^m$.\n",
+ "\n",
+ "这个映射又被称作 $f$ 在 $x$ 的[前推映射](https://en.wikipedia.org/wiki/Pushforward_(differential))。Jacobian矩阵只是这个线性映射在标准情况下的矩阵形式。\n",
+ "\n",
+ "如果我们不拘泥于一个特定的点 $x$,那么函数 $\\partial f$ 可被视为先取一个输入点然后返回那个点上的Jacobian线性映射:\n",
+ "\n",
+ "$\\qquad \\partial f : \\mathbb{R}^n \\to \\mathbb{R}^n \\to \\mathbb{R}^m$.\n",
+ "\n",
+ "尤其是,做反curring时,给定输入 $x \\in \\mathbb{R}^n$ 和切向量 $v \\in \\mathbb{R}^n$,返回一个输出切向量 $\\mathbb{R}^m$。我们把从 $(x, v)$ 到输出切向量的映射称之为 _Jacobian向量积_,写作:\n",
+ "\n",
+ "$\\qquad (x, v) \\mapsto \\partial f(x) v$\n",
+ "\n",
+ "### MindSpore中的JVP\n",
+ "\n",
+ "回到Python代码上,MindSpore的 `jvp` 函数模拟了上述转换。 给定一个Python函数 $f$, MindSpore的 `jvp` 可以得到一个表达 $(x, v) \\mapsto (f(x), \\partial f(x) v)$ 的函数\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {
+ "scrolled": true
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[0.89045584 0.5856106 0.52238137 0.5020062 ] [ 0.01188576 0.00967572 -0.15435933 0.17893277]\n"
+ ]
+ }
+ ],
+ "source": [
+ "from mindspore import jvp\n",
+ "\n",
+ "# Isolate the function from the weight matrix to the predictions\n",
+ "\n",
+ "\n",
+ "def f(W):\n",
+ " return predict(W, b, inputs)\n",
+ "\n",
+ "\n",
+ "v = Tensor(np.random.normal(size=W.shape), mstype.float32)\n",
+ "# Push forward the vector `v` along `f` evaluated at `W`\n",
+ "y, u = jvp(f, (W), (v))\n",
+ "print(y, u)\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "按照[Haskell类型风格](https://wiki.haskell.org/Type_signature), 有:\n",
+ "\n",
+ "```haskell\n",
+ "jvp :: (a -> b) -> a -> T a -> (b, T b)\n",
+ "```\n",
+ "\n",
+ "在这里,我们用 `T a` 表示 `a` 切空间的类型。简而言之, `jvp` 的参数有 `a -> b`类型函数,、 `a` 类型的值和`T a`切向量。返回的是`b`类型的值和`T b` 类型的切向量。\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "`jvp`的计算方式与原始函数很相似,但它与每个`a`类型的原始值配对,并推送`T a`类型的切线值。对于每个原始函数会应用的原始数值操作,`jvp`转换后的函数为该原始函数执行一个 \"JVP规则\",既对原始值进行评估,又在这些原始值上应用原始的JVP。\n",
+ "\n",
+ "这种计算策略对计算的复杂度有直接的影响:因为在计算JVP的过程中不用存储任何东西,所以空间开销和计算的深度完全无关。除此之外, `jvp` 转换过的函数FLOP开销约是原函数的3倍 (一份来自原函数的计算,比如 `sin(x)`; 一份来自线性化,如 `cos(x)`;还有一份来自于将线性化函数施加在向量上,如 `cos_x * v`)。 换句话说,对于固定的点 $x$,我们计算 $v \\mapsto \\partial f(x) \\cdot v$ 和计算 $f$的边际成本是相近的。\n",
+ "\n",
+ "这里的空间复杂度看起来很有说服力,但我们在机器学习中并不经常见到前向模式。\n",
+ "\n",
+ "为了回答这个问题,首先假设要用JVP构建一个完整的Jacobian矩阵。如果我们是对一个one-hot切向量用了JVP,结果反映的是Jacobian矩阵的一列,对应填入的非零项。所以我们是可以通过一次构建一列的方式构建一个完整的Jacobian矩阵的,而且每一列的开销和一次函数计算差不多。这就意味这对于\"高\"的Jaocbian矩阵来说比较合算,但对于\"宽\"的就较为低效。\n",
+ "\n",
+ "如果在机器学习中做基于梯度的优化,你可能想要最小化损失函数,这个损失函数以 $\\mathbb{R}^n$ 为参数,返回一个标量值$\\mathbb{R}$。 这就意味着该函数的Jacobian矩阵会很宽了:$\\partial f(x) \\in \\mathbb{R}^{1 \\times n}$,一般我们会认为和梯度向量 $\\nabla f(x) \\in \\mathbb{R}^n$ 一样。一次一列地构建这个矩阵,而且每列的FLOP和原函数计算一次的开销差不多,这个开销当然是不小的。尤其是,对于训练神经网络来说,损失函数 $f$ 的 $n$ 可以达到上亿的量级,这就更暴露出前向模式的问题了。\n",
+ "\n",
+ "为了解决这种问题,就需要反向模式了。\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "### 向量Jacobian 积 (VJP, 反向模式自动微分)\n",
+ "\n",
+ "和前向模式的一次一列的方式不同,反向模式的构造方式是一次一行。\n",
+ "\n",
+ "### VJPs 的数学背景\n",
+ "\n",
+ "首先考虑有 $f : \\mathbb{R}^n \\to \\mathbb{R}^m$。 其VJP表达为:\n",
+ "\n",
+ "$\\qquad (x, v) \\mapsto v \\partial f(x)$,\n",
+ "\n",
+ "其中 $v$ 是 $f$ 在 $x$ 的余切空间($\\mathbb{R}^m$ 的同构)。严谨来说,$v$ 是线性映射 $v : \\mathbb{R}^m \\to \\mathbb{R}$, $v \\partial f(x)$ 指的是复合函数 $v \\circ \\partial f(x)$,在 $\\partial f(x) : \\mathbb{R}^n \\to \\mathbb{R}^m$ 时成立。 不过通常 $v$ 都可以视为 $\\mathbb{R}^m$ 中的向量,这两个写法基本可以互换。\n",
+ "\n",
+ "有了这些说明后,我们把VJP的线性部分视为JVP线性部分的转置(或伴随、共轭):\n",
+ "\n",
+ "$\\qquad (x, v) \\mapsto \\partial f(x)^\\mathsf{T} v$.\n",
+ "\n",
+ "对点 $x$,有:\n",
+ "\n",
+ "$\\qquad \\partial f(x)^\\mathsf{T} : \\mathbb{R}^m \\to \\mathbb{R}^n$.\n",
+ "\n",
+ "对余切空间的映射通常称为 $f$ 在 $x$ 的[拉回](https://en.wikipedia.org/wiki/Pullback_(differential_geometry))。理解的关键在于拉回会从形似输出 $f$ 的形式得到形似输入 $f$ 的形式,就像线性函数转置一样。\n",
+ "\n",
+ "### MindSpore中使用VJP\n",
+ "\n",
+ "MindSpore `vjp` 以一个python函数 $f$ 为输入,返回表示 VJP $(x, v) \\mapsto (f(x), v^\\mathsf{T} \\partial f(x))$ 的函数。\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 66,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ 0.6064372 -1.1690241 0.32237193]\n"
+ ]
+ }
+ ],
+ "source": [
+ "from mindspore import vjp\n",
+ "\n",
+ "# Isolate the function from the weight matrix to the predictions\n",
+ "\n",
+ "\n",
+ "def f(W):\n",
+ " return predict(W, b, inputs)\n",
+ "\n",
+ "\n",
+ "y, vjp_fun = vjp(f, W)\n",
+ "\n",
+ "u = Tensor(np.random.normal(size=y.shape), mstype.float32)\n",
+ "\n",
+ "# Pull back the covector `u` along `f` evaluated at `W`\n",
+ "v = vjp_fun(u)\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "仿照 [Haskell-like type signatures](https://wiki.haskell.org/Type_signature), 有\n",
+ "\n",
+ "```haskell\n",
+ "vjp :: (a -> b) -> a -> (b, CT b -> CT a)\n",
+ "```\n",
+ "\n",
+ "其中,我们用`CT a`来表示`a`的余切空间的类型。换句话说,`vjp`将一个`a -> b`类型的函数和一个`a`类型的点作为参数,并返回一个由`b`类型的值和`CT b -> CT a`类型的线性映射组成的对。\n",
+ "\n",
+ "VJP一个优良的性质在于VJP是按行构建Jacobian矩阵, $(x, v) \\mapsto (f(x), v^\\mathsf{T} \\partial f(x))$ 的FLOP仅为计算 $f$ 的三倍左右。而且计算 $f : \\mathbb{R}^n \\to \\mathbb{R}$ 的梯度,我们只需要一次VJP就够了。这就是为什么 `grad` 在大的神经网络中做梯度优化依然高效。\n",
+ "\n",
+ "不过还有一点需要考虑一下: 尽管 FLOP开销不高,VJP的空间复杂度是随计算深度上升而上升的。而且实现上通常比前向模式复杂。\n",
+ "\n",
+ "反向模式的更多说明请参阅 [this tutorial video from the Deep Learning Summer School in 2017](http://videolectures.net/deeplearning2017_johnson_automatic_differentiation/).\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## VJP计算梯度向量\n",
+ "\n",
+ "可以用VJP得到梯度向量:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[[6. 6.]\n",
+ " [6. 6.]]\n"
+ ]
+ }
+ ],
+ "source": [
+ "from mindspore import vjp\n",
+ "\n",
+ "context.set_context(mode=ms.PYNATIVE_MODE)\n",
+ "\n",
+ "\n",
+ "def vgrad(f, x):\n",
+ " y, vjp_fn = vjp(f, x)\n",
+ " return vjp_fn(ops.ones(y.shape))[0]\n",
+ "\n",
+ "\n",
+ "print(vgrad(lambda x: 3*x**2, ops.ones((2, 2))))\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## 用前向和反向模式得到Hessian向量积\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "仅用反向模式得到Hessian向量积的实现:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 146,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "def hvp(f, x, v):\n",
+ " return grad(lambda x: ops.inner(grad(f)(x), v))(x)\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "通过组合使用前反向的方法我们可以得到更高效的实现。\n",
+ "\n",
+ "设有待微分函数 $f : \\mathbb{R}^n \\to \\mathbb{R}$ , 在点 $x \\in \\mathbb{R}^n$ 线性化函数,并有向量 $v \\in \\mathbb{R}^n$。 Hessian向量积函数为:\n",
+ "\n",
+ "$(x, v) \\mapsto \\partial^2 f(x) v$\n",
+ "\n",
+ "构造helper function $g : \\mathbb{R}^n \\to \\mathbb{R}^n$,定义为 $f$ 的导数(或梯度), 即 $g(x) = \\partial f(x)$。使用一次JVP,便得到:\n",
+ "\n",
+ "$(x, v) \\mapsto \\partial g(x) v = \\partial^2 f(x) v$。\n",
+ "\n",
+ "用代码写作:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from mindspore import jvp, grad\n",
+ "\n",
+ "# forward-over-reverse\n",
+ "\n",
+ "\n",
+ "def hvp(f, primals, tangents):\n",
+ " return jvp(grad(f), primals, tangents)[1]\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "在这里我们不需要 `ops.inner`,该 `hvp` 函数对任何shape的数组都成立。\n",
+ "\n",
+ "以下是该函数的一个样例:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "True\n"
+ ]
+ }
+ ],
+ "source": [
+ "def f(X):\n",
+ " return ops.sum(ops.tanh(X)**2)\n",
+ "\n",
+ "\n",
+ "X = Tensor(np.random.normal(size=(30, 40)), mstype.float32)\n",
+ "V = Tensor(np.random.normal(size=(30, 40)), mstype.float32)\n",
+ "\n",
+ "ans1 = hvp(f, (X), (V))\n",
+ "ans2 = ms.numpy.tensordot(hessian(f)(X), V, 2)\n",
+ "\n",
+ "print(np.allclose(ans1.numpy(), ans2.numpy(), 1e-4, 1e-4))\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "你也可以考虑写一种先前向后反向的方式:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# reverse-over-forward\n",
+ "def hvp_revfwd(f, primals, tangents):\n",
+ " def g(primals):\n",
+ " return jvp(f, primals, tangents)[1]\n",
+ " return grad(g)(primals)\n"
+ ]
+ },
+ {
+ "attachments": {},
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "不过这就不是很高效了,因为前向模式比反向模式的开销低一些,而且由于外层微分算子计算量比内层的要大,继续在外层用前向模式反而更好:\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Forward over reverse\n",
+ "297 ms ± 9.5 ms per loop (mean ± std. dev. of 3 runs, 10 loops each)\n",
+ "Reverse over forward\n",
+ "2.48 ms ± 257 µs per loop (mean ± std. dev. of 3 runs, 10 loops each)\n",
+ "Reverse over reverse\n",
+ "4.44 ms ± 51.9 µs per loop (mean ± std. dev. of 3 runs, 10 loops each)\n",
+ "Naive full Hessian materialization\n",
+ "1.23 s ± 13.6 ms per loop (mean ± std. dev. of 3 runs, 10 loops each)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# reverse-over-reverse, only works for single arguments\n",
+ "context.set_context(mode=ms.PYNATIVE_MODE)\n",
+ "\n",
+ "\n",
+ "def hvp_revrev(f, primals, tangents):\n",
+ " x = primals\n",
+ " v = tangents\n",
+ " return grad(lambda x: ops.inner(grad(f)(x), v))(x)\n",
+ "\n",
+ "\n",
+ "print(\"Forward over reverse\")\n",
+ "%timeit - n10 - r3 hvp(f, (X), (V))\n",
+ "print(\"Reverse over forward\")\n",
+ "%timeit - n10 - r3 hvp_revfwd(f, (X), (V))\n",
+ "print(\"Reverse over reverse\")\n",
+ "%timeit - n10 - r3 hvp_revrev(f, (X), (V))\n",
+ "print(\"Naive full Hessian materialization\")\n",
+ "%timeit - n10 - r3 ms.numpy.tensordot(hessian(f)(X), V, 2)\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.13"
+ },
+ "vscode": {
+ "interpreter": {
+ "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/MindFlow/mindflow/cfd/space_solver/reconstructor/__init__.py b/MindFlow/mindflow/cfd/space_solver/reconstructor/__init__.py
index 51668856bb98d6e511f03fd1ba96ece39a9b0877..ac358fb11e04aa96f5e0eaede1339b0c18609d72 100644
--- a/MindFlow/mindflow/cfd/space_solver/reconstructor/__init__.py
+++ b/MindFlow/mindflow/cfd/space_solver/reconstructor/__init__.py
@@ -13,10 +13,14 @@
# limitations under the License.
# ==============================================================================
"""init of reconstructor."""
+from .weno3 import WENO3
from .weno5 import WENO5
+from .weno7 import WENO7
_reconstructor_dict = {
+ 'WENO3': WENO3,
'WENO5': WENO5,
+ 'WENO7': WENO7,
}
diff --git a/MindFlow/mindflow/cfd/space_solver/reconstructor/weno3.py b/MindFlow/mindflow/cfd/space_solver/reconstructor/weno3.py
new file mode 100644
index 0000000000000000000000000000000000000000..0448a9fcf3a409e0ba7fe2393b7a2a0f82afdd92
--- /dev/null
+++ b/MindFlow/mindflow/cfd/space_solver/reconstructor/weno3.py
@@ -0,0 +1,121 @@
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""3th order Weighted Essentially Non-Oscillatory (WENO) scheme"""
+from mindspore import jit_class
+
+from .base import Reconstructor
+
+
+@jit_class
+class WENO3(Reconstructor):
+ r"""
+ 3th order Weighted Essentially Non-Oscillatory (WENO) scheme
+
+ The basic idea of the WENO scheme is to replace the ENO scheme which only uses the smoothest interpolation region to
+ provide an approximation of the numerical fluxes at mesh sections with a weighted average of the numerical fluxes
+ at mesh interfaces provided by each possible interpolation region. Specifically, each possible interpolation region
+ is assigned a weight that determines its contribution to the numerical flux at the final grid interface,
+
+ For more details, please refers to the paper: `G.S. Jiang, C.W. Shu, Efficient implementation of weighted ENO
+ schemes, J. Comput. Phys. 126 (1996) 202-228.`
+
+ Args:
+ mesh_info (MeshInfo): The information container of the computing mesh.
+
+ Raises:
+ ValueError: If `mesh_info.pad` is less than 2.
+
+ Supported Platforms:
+ ``GPU``
+
+ """
+
+ def __init__(self, mesh_info):
+ super(WENO3, self).__init__(mesh_info)
+ self._coe1 = [
+ [1.0 / 3.0, 2.0 / 3.0],
+ [2.0 / 3.0, 1.0 / 3.0],
+ ]
+ self._coe2 = [
+ [[-0.5, 1.5], [0.5, 0.5]],
+ [[0.5, 0.5], [1.5, -0.5]],
+ ]
+
+ if self.pad < 2:
+ raise ValueError('pad should be not smaller than 2 for WENO3 reconstructor')
+ self.eps = 1e-5
+
+ def _reconstruct_on_face(self, var, axis, j):
+ """
+ Calculate the recunstructed variables on faces.
+
+ Inputs:
+ - **var** (Tensor) - Input tensor.
+ - **axis** (int) - 0, 1, 2 indicate x-dimension, y-dimension and z-dimension respectively.
+ - **j** (int) - reconstruct direction, 0, 1 indicate reconstruct from left and right respectively.
+
+ Outputs:
+ Tensor, output tensor.
+ """
+ var_0, var_1, var_2 = self._get_var(var, axis, j)
+
+ beta_0 = (var_1 - var_0) ** 2
+ beta_1 = (var_2 - var_1) ** 2
+
+ one_beta_0_sq = 1.0 / ((self.eps + beta_0) * (self.eps + beta_0))
+ one_beta_1_sq = 1.0 / ((self.eps + beta_1) * (self.eps + beta_1))
+
+ alpha_0 = self._coe1[j][0] * one_beta_0_sq
+ alpha_1 = self._coe1[j][1] * one_beta_1_sq
+
+ one_alpha = 1.0 / (alpha_0 + alpha_1)
+
+ omega_0 = alpha_0 * one_alpha
+ omega_1 = alpha_1 * one_alpha
+
+ p_0 = self._coe2[j][0][0] * var_0 + self._coe2[j][0][1] * var_1
+ p_1 = self._coe2[j][1][0] * var_1 + self._coe2[j][1][1] * var_2
+
+ var_on_face = omega_0 * p_0 + omega_1 * p_1
+
+ output_size = [
+ 3,
+ ] + self.mesh_info.number_of_cells
+ output_size[axis + 1] += 1
+
+ return self._slice(var_on_face, output_size)
+
+ def _get_var(self, inputs, axis, j):
+ """get variables for reconstructor."""
+ var_0 = None
+ var_1 = None
+ var_2 = None
+
+ if axis == 0:
+ var_0 = inputs[:, self.pad - 2 + j : self.pad - 1 + j + self.mesh_info.number_of_cells[0], :, :]
+ var_1 = inputs[:, self.pad - 1 + j : self.pad + j + self.mesh_info.number_of_cells[0], :, :]
+ var_2 = inputs[:, self.pad + j : self.pad + j + 1 + self.mesh_info.number_of_cells[0], :, :]
+
+ if axis == 1:
+ var_0 = inputs[:, :, self.pad - 2 + j : self.pad - 1 + j + self.mesh_info.number_of_cells[1], :]
+ var_1 = inputs[:, :, self.pad - 1 + j : self.pad + j + self.mesh_info.number_of_cells[1], :]
+ var_2 = inputs[:, :, self.pad + j : self.pad + j + 1 + self.mesh_info.number_of_cells[1], :]
+
+ if axis == 2:
+ var_0 = inputs[:, :, :, self.pad - 2 + j : self.pad - 1 + j + self.mesh_info.number_of_cells[2]]
+ var_1 = inputs[:, :, :, self.pad - 1 + j : self.pad + j + self.mesh_info.number_of_cells[2]]
+ var_2 = inputs[:, :, :, self.pad + j : self.pad + j + 1 + self.mesh_info.number_of_cells[2]]
+
+ return var_0, var_1, var_2
diff --git a/MindFlow/mindflow/cfd/space_solver/reconstructor/weno7.py b/MindFlow/mindflow/cfd/space_solver/reconstructor/weno7.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ba1fb897603c4d8849c8643fa47127266c761ff
--- /dev/null
+++ b/MindFlow/mindflow/cfd/space_solver/reconstructor/weno7.py
@@ -0,0 +1,199 @@
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""7th order Weighted Essentially Non-Oscillatory (WENO) scheme"""
+from mindspore import jit_class
+
+from .base import Reconstructor
+
+
+@jit_class
+class WENO7(Reconstructor):
+ r"""
+ 7th order Weighted Essentially Non-Oscillatory (WENO) scheme
+
+ The basic idea of the WENO scheme is to replace the ENO scheme which only uses the smoothest interpolation region to
+ provide an approximation of the numerical fluxes at mesh sections with a weighted average of the numerical fluxes
+ at mesh interfaces provided by each possible interpolation region. Specifically, each possible interpolation region
+ is assigned a weight that determines its contribution to the numerical flux at the final grid interface,
+
+ For more details, please refers to the paper: `D.S. Balsara, C.W. Shu, J. Comput. Phys. 160 (2) (2000) 405–452,
+ https://doi.org/10.1006/jcph.2000.6443.`
+
+ Args:
+ mesh_info (MeshInfo): The information container of the computing mesh.
+
+ Raises:
+ ValueError: If `mesh_info.pad` is less than 4.
+
+ Supported Platforms:
+ ``GPU``
+
+ """
+
+ def __init__(self, mesh_info):
+ super().__init__(mesh_info)
+ self._coe1 = [
+ [1.0 / 35.0, 12.0 / 35.0, 18.0 / 35.0, 4.0 / 35.0],
+ [4.0 / 35.0, 18.0 / 35.0, 12.0 / 35.0, 1.0 / 35.0],
+ ]
+ self._coe2 = [
+ [
+ [-1.0 / 4.0, 13.0 / 12.0, -23.0 / 12.0, 25.0 / 12.0],
+ [1.0 / 12.0, -5.0 / 12.0, 13.0 / 12.0, 1.0 / 4.0],
+ [-1.0 / 12.0, 7.0 / 12.0, 7.0 / 12.0, -1.0 / 12.0],
+ [1.0 / 4.0, 13.0 / 12.0, -5.0 / 12.0, 1.0 / 12.0],
+ ],
+ [
+ [1.0 / 12.0, -5.0 / 12.0, 13.0 / 12.0, 1.0 / 4.0],
+ [-1.0 / 12.0, 7.0 / 12.0, 7.0 / 12.0, -1.0 / 12.0],
+ [1.0 / 4.0, 13.0 / 12.0, -5.0 / 12.0, 1.0 / 12.0],
+ [25.0 / 12.0, -23.0 / 12.0, 13.0 / 12.0, -1.0 / 4.0],
+ ],
+ ]
+ if self.pad < 4:
+ raise ValueError('pad should be not smaller than 4 for WENO7 reconstructor')
+ self.eps = 1e-5
+
+ def _reconstruct_on_face(self, var, axis, j):
+ """
+ Calculate the recunstructed variables on faces.
+
+ Inputs:
+ - **var** (Tensor) - Input tensor.
+ - **axis** (int) - 0, 1, 2 indicate x-dimension, y-dimension and z-dimension respectively.
+ - **j** (int) - reconstruct direction, 0, 1 indicate reconstruct from left and right respectively.
+
+ Outputs:
+ Tensor, output tensor.
+ """
+ var_0, var_1, var_2, var_3, var_4, var_5, var_6 = self._get_var(var, axis, j)
+
+ beta_0 = (
+ var_0 * (547 * var_0 - 3882 * var_1 + 4642 * var_2 - 1854 * var_3)
+ + var_1 * (7043 * var_1 - 17246 * var_2 + 7042 * var_3)
+ + var_2 * (11003 * var_2 - 9402 * var_3)
+ + var_3 * (2107 * var_3)
+ )
+
+ beta_1 = (
+ var_1 * (267 * var_1 - 1642 * var_2 + 1602 * var_3 - 494 * var_4)
+ + var_2 * (2843 * var_2 - 5966 * var_3 + 1922 * var_4)
+ + var_3 * (3443 * var_3 - 2522 * var_4)
+ + var_4 * (547 * var_4)
+ )
+
+ beta_2 = (
+ var_2 * (547 * var_2 - 2522 * var_3 + 1922 * var_4 - 494 * var_5)
+ + var_3 * (3443 * var_3 - 5966 * var_4 + 1602 * var_5)
+ + var_4 * (2843 * var_4 - 1642 * var_5)
+ + var_5 * (267 * var_5)
+ )
+
+ beta_3 = (
+ var_3 * (2107 * var_3 - 9402 * var_4 + 7042 * var_5 - 1854 * var_6)
+ + var_4 * (11003 * var_4 - 17246 * var_5 + 4642 * var_6)
+ + var_5 * (7043 * var_5 - 3882 * var_6)
+ + var_6 * (547 * var_6)
+ )
+
+ one_beta_0_sq = 1.0 / (self.eps + beta_0) ** 2
+ one_beta_1_sq = 1.0 / (self.eps + beta_1) ** 2
+ one_beta_2_sq = 1.0 / (self.eps + beta_2) ** 2
+ one_beta_3_sq = 1.0 / (self.eps + beta_3) ** 2
+
+ alpha_0 = self._coe1[j][0] * one_beta_0_sq
+ alpha_1 = self._coe1[j][1] * one_beta_1_sq
+ alpha_2 = self._coe1[j][2] * one_beta_2_sq
+ alpha_3 = self._coe1[j][3] * one_beta_3_sq
+
+ one_alpha = 1.0 / (alpha_0 + alpha_1 + alpha_2 + alpha_3)
+
+ omega_0 = alpha_0 * one_alpha
+ omega_1 = alpha_1 * one_alpha
+ omega_2 = alpha_2 * one_alpha
+ omega_3 = alpha_3 * one_alpha
+
+ p_0 = (
+ self._coe2[j][0][0] * var_0
+ + self._coe2[j][0][1] * var_1
+ + self._coe2[j][0][2] * var_2
+ + self._coe2[j][0][3] * var_3
+ )
+ p_1 = (
+ self._coe2[j][1][0] * var_1
+ + self._coe2[j][1][1] * var_2
+ + self._coe2[j][1][2] * var_3
+ + self._coe2[j][1][3] * var_4
+ )
+ p_2 = (
+ self._coe2[j][2][0] * var_2
+ + self._coe2[j][2][1] * var_3
+ + self._coe2[j][2][2] * var_4
+ + self._coe2[j][2][3] * var_5
+ )
+ p_3 = (
+ self._coe2[j][3][0] * var_3
+ + self._coe2[j][3][1] * var_4
+ + self._coe2[j][3][2] * var_5
+ + self._coe2[j][3][3] * var_6
+ )
+
+ var_on_face = omega_0 * p_0 + omega_1 * p_1 + omega_2 * p_2 + omega_3 * p_3
+
+ output_size = [
+ 7,
+ ] + self.mesh_info.number_of_cells
+ output_size[axis + 1] += 1
+
+ return self._slice(var_on_face, output_size)
+
+ def _get_var(self, inputs, axis, j):
+ """get variables for reconstructor."""
+ var_0 = None
+ var_1 = None
+ var_2 = None
+ var_3 = None
+ var_4 = None
+ var_5 = None
+ var_6 = None
+
+ if axis == 0:
+ var_0 = inputs[:, self.pad - 4 + j : self.pad - 3 + j + self.mesh_info.number_of_cells[0], :, :]
+ var_1 = inputs[:, self.pad - 3 + j : self.pad - 2 + j + self.mesh_info.number_of_cells[0], :, :]
+ var_2 = inputs[:, self.pad - 2 + j : self.pad - 1 + j + self.mesh_info.number_of_cells[0], :, :]
+ var_3 = inputs[:, self.pad - 1 + j : self.pad + j + self.mesh_info.number_of_cells[0], :, :]
+ var_4 = inputs[:, self.pad + j : self.pad + 1 + j + self.mesh_info.number_of_cells[0], :, :]
+ var_5 = inputs[:, self.pad + j + 1 : self.pad + j + 2 + self.mesh_info.number_of_cells[0], :, :]
+ var_6 = inputs[:, self.pad + j + 2 : self.pad + j + 3 + self.mesh_info.number_of_cells[0], :, :]
+
+ if axis == 1:
+ var_0 = inputs[:, :, self.pad - 4 + j : self.pad - 3 + j + self.mesh_info.number_of_cells[1], :]
+ var_1 = inputs[:, :, self.pad - 3 + j : self.pad - 2 + j + self.mesh_info.number_of_cells[1], :]
+ var_2 = inputs[:, :, self.pad - 2 + j : self.pad - 1 + j + self.mesh_info.number_of_cells[1], :]
+ var_3 = inputs[:, :, self.pad - 1 + j : self.pad + j + self.mesh_info.number_of_cells[1], :]
+ var_4 = inputs[:, :, self.pad + j : self.pad + 1 + j + self.mesh_info.number_of_cells[1], :]
+ var_5 = inputs[:, :, self.pad + j + 1 : self.pad + j + 2 + self.mesh_info.number_of_cells[1], :]
+ var_6 = inputs[:, :, self.pad + j + 2 : self.pad + j + 3 + self.mesh_info.number_of_cells[1], :]
+
+ if axis == 2:
+ var_0 = inputs[:, :, : self.pad - 4 + j : self.pad - 3 + j + self.mesh_info.number_of_cells[2]]
+ var_1 = inputs[:, :, : self.pad - 3 + j : self.pad - 2 + j + self.mesh_info.number_of_cells[2]]
+ var_2 = inputs[:, :, : self.pad - 2 + j : self.pad - 1 + j + self.mesh_info.number_of_cells[2]]
+ var_3 = inputs[:, :, : self.pad - 1 + j : self.pad + j + self.mesh_info.number_of_cells[2]]
+ var_4 = inputs[:, :, : self.pad + j : self.pad + 1 + j + self.mesh_info.number_of_cells[2]]
+ var_5 = inputs[:, :, : self.pad + j + 1 : self.pad + j + 2 + self.mesh_info.number_of_cells[2]]
+ var_6 = inputs[:, :, : self.pad + j + 2 : self.pad + j + 3 + self.mesh_info.number_of_cells[2]]
+
+ return [var_0, var_1, var_2, var_3, var_4, var_5, var_6]
diff --git a/MindFlow/mindflow/cfd/space_solver/riemann_computer/__init__.py b/MindFlow/mindflow/cfd/space_solver/riemann_computer/__init__.py
index c55b706e9acb5cb666113452ea08ba77a44d23a6..a8924f33692a97aabeacd0cee3d2d24d7a131634 100644
--- a/MindFlow/mindflow/cfd/space_solver/riemann_computer/__init__.py
+++ b/MindFlow/mindflow/cfd/space_solver/riemann_computer/__init__.py
@@ -13,13 +13,12 @@
# limitations under the License.
# ==============================================================================
"""init of riemann computer."""
+from .hllc import HLLC
+from .roe import Roe
from .rusanov import Rusanov
from .rusanov_net import RusanovNet
-_riemann_dict = {
- 'Rusanov': Rusanov,
- 'RusanovNet': RusanovNet
-}
+_riemann_dict = {'Rusanov': Rusanov, 'RusanovNet': RusanovNet, 'HLLC': HLLC, 'Roe': Roe}
def define_riemann_computer(name):
diff --git a/MindFlow/mindflow/cfd/space_solver/riemann_computer/hllc.py b/MindFlow/mindflow/cfd/space_solver/riemann_computer/hllc.py
new file mode 100644
index 0000000000000000000000000000000000000000..53573c4f1cb65fd87fffd920f2bb7cbfda537713
--- /dev/null
+++ b/MindFlow/mindflow/cfd/space_solver/riemann_computer/hllc.py
@@ -0,0 +1,157 @@
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""HLLC (Harten-Lax-van Leer-Contact) Riemann Solver"""
+from mindspore import jit_class
+from mindspore import numpy as mnp
+
+from ...utils import cal_flux, cal_pri_var
+from .base import RiemannComputer
+
+
+@jit_class
+class HLLC(RiemannComputer):
+ r"""
+ HLLC (Harten-Lax-van Leer-Contact) Riemann Solver based on Toro et al. 2009
+
+ Args:
+ material (Material): The information container of the fluid material.
+
+ Supported Platforms:
+ ``GPU``
+
+ """
+
+ def __init__(self, material, net_dict=None):
+ self.minor = [
+ [2, 3],
+ [3, 1],
+ [1, 2],
+ ]
+ super().__init__(material)
+
+ def compute_riemann_flux(self, con_var_left, con_var_right, axis):
+ """
+ Compute Riemann flux on face.
+
+ Inputs:
+ - **con_var_left** (Tensor) - Conservative variables on left side face.
+ - **con_var_right** (Tensor) - Conservative variables on right side face.
+ - **axis** (int) - 0, 1, 2 indicate x-dimension, y-dimension and z-dimension respectively.
+
+ Outputs:
+ Tensor, calculated riemann flux.
+ """
+ pri_var_left = cal_pri_var(con_var_left, self.material)
+ pri_var_right = cal_pri_var(con_var_right, self.material)
+
+ flux_left = cal_flux(con_var_left, pri_var_left, axis)
+ flux_right = cal_flux(con_var_right, pri_var_right, axis)
+
+ sound_speed_left = self.material.sound_speed(pri_var_left)
+ sound_speed_right = self.material.sound_speed(pri_var_right)
+
+ # Step 1: pressure estimate
+ rho_bar = 0.5 * (pri_var_left[0] + pri_var_right[0])
+ sound_speed_mean = 0.5 * (sound_speed_left + sound_speed_right)
+ pressure_pvrs = (
+ 0.5 * (pri_var_left[4] + pri_var_right[4])
+ - 0.5 * (pri_var_left[axis + 1] - pri_var_right[axis + 1]) * rho_bar * sound_speed_mean
+ )
+ pressure_star = mnp.maximum(0.0, pressure_pvrs)
+
+ # Step 2.1: left and right wave speed estimate
+ gamma_ = (self.material.gamma + 1) * 0.5 / self.material.gamma
+ q_left = 1.0 * (pressure_star <= pri_var_left[4]) + mnp.sqrt(
+ 1 + gamma_ * (pressure_star / pri_var_left[4] - 1)
+ ) * (pressure_star > pri_var_left[4])
+ q_right = 1.0 * (pressure_star <= pri_var_right[4]) + mnp.sqrt(
+ 1 + gamma_ * (pressure_star / pri_var_right[4] - 1)
+ ) * (pressure_star > pri_var_right[4])
+ wave_speed_left = pri_var_left[axis + 1] - sound_speed_left * q_left
+ wave_speed_right = pri_var_right[axis + 1] + sound_speed_right * q_right
+ wave_speed_left = mnp.minimum(wave_speed_left, 0.0)
+ wave_speed_right = mnp.maximum(wave_speed_right, 0.0)
+
+ # Step 2.2: wave speed estimate
+ delta_u_left = wave_speed_left - pri_var_left[axis + 1]
+ delta_u_right = wave_speed_right - pri_var_right[axis + 1]
+ delta_rho_su = pri_var_left[0] * delta_u_left - pri_var_right[0] * delta_u_right
+ wave_speed_star = (
+ 1.0
+ / delta_rho_su
+ * (
+ pri_var_right[4]
+ - pri_var_left[4]
+ + pri_var_left[0] * pri_var_left[axis + 1] * delta_u_left
+ - pri_var_right[0] * pri_var_right[axis + 1] * delta_u_right
+ )
+ )
+
+ # Step 3: Compute the HLLC flux
+
+ # Compute pre-factors for left and right states
+ pre_factor_left = (
+ (wave_speed_left - pri_var_left[axis + 1]) / (wave_speed_left - wave_speed_star) * pri_var_left[0]
+ )
+ pre_factor_right = (
+ (wave_speed_right - pri_var_right[axis + 1]) / (wave_speed_right - wave_speed_star) * pri_var_right[0]
+ )
+
+ # Compute the star state for left and right states
+ u_star_left = [
+ pre_factor_left,
+ pre_factor_left,
+ pre_factor_left,
+ pre_factor_left,
+ pre_factor_left
+ * (
+ con_var_left[4] / con_var_left[0]
+ + (wave_speed_star - pri_var_left[axis + 1])
+ * (wave_speed_star + pri_var_left[4] / pri_var_left[0] / (wave_speed_left - pri_var_left[axis + 1]))
+ ),
+ ]
+ u_star_left[axis + 1] *= wave_speed_star
+ u_star_left[self.minor[axis][0]] *= pri_var_left[self.minor[axis][0]]
+ u_star_left[self.minor[axis][1]] *= pri_var_left[self.minor[axis][1]]
+ u_star_left = mnp.stack(u_star_left)
+
+ u_star_right = [
+ pre_factor_right,
+ pre_factor_right,
+ pre_factor_right,
+ pre_factor_right,
+ pre_factor_right
+ * (
+ con_var_right[4] / con_var_right[0]
+ + (wave_speed_star - pri_var_right[axis + 1])
+ * (wave_speed_star + pri_var_right[4] / pri_var_right[0] / (wave_speed_right - pri_var_right[axis + 1]))
+ ),
+ ]
+ u_star_right[axis + 1] *= wave_speed_star
+ u_star_right[self.minor[axis][0]] *= pri_var_right[self.minor[axis][0]]
+ u_star_right[self.minor[axis][1]] *= pri_var_right[self.minor[axis][1]]
+ u_star_right = mnp.stack(u_star_right)
+
+ # Compute the flux at the star state for left and right states
+ flux_star_left = flux_left + wave_speed_left * (u_star_left - con_var_left)
+ flux_star_right = flux_right + wave_speed_right * (u_star_right - con_var_right)
+
+ # Compute the final flux
+ fluxes = (
+ 0.5 * (1 + mnp.sign(wave_speed_star)) * flux_star_left
+ + 0.5 * (1 - mnp.sign(wave_speed_star)) * flux_star_right
+ )
+
+ return fluxes
diff --git a/MindFlow/mindflow/cfd/space_solver/riemann_computer/roe.py b/MindFlow/mindflow/cfd/space_solver/riemann_computer/roe.py
new file mode 100644
index 0000000000000000000000000000000000000000..7fc165691b925322870c25b2ff92d9e70448b055
--- /dev/null
+++ b/MindFlow/mindflow/cfd/space_solver/riemann_computer/roe.py
@@ -0,0 +1,377 @@
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Roe Riemann Solver"""
+from mindspore import jit_class, ops
+from mindspore import numpy as mnp
+
+from ...utils import cal_flux, cal_pri_var
+from .base import RiemannComputer
+
+
+@jit_class
+class Roe(RiemannComputer):
+ r"""
+ ROE Riemann Solver based on Toro et al. 2009
+
+ Args:
+ material (Material): The information container of the fluid material.
+
+ Supported Platforms:
+ ``GPU``
+
+ """
+
+ def __init__(self, material, net_dict=None):
+ super().__init__(material)
+
+ def compute_riemann_flux(self, con_var_left, con_var_right, axis):
+ """
+ Compute Riemann flux on face.
+
+ Inputs:
+ - **con_var_left** (Tensor) - Conservative variables on left side face.
+ - **con_var_right** (Tensor) - Conservative variables on right side face.
+ - **axis** (int) - 0, 1, 2 indicate x-dimension, y-dimension and z-dimension respectively.
+
+ Outputs:
+ Tensor, calculated riemann flux.
+ """
+ pri_var_left = cal_pri_var(con_var_left, self.material)
+ pri_var_right = cal_pri_var(con_var_right, self.material)
+
+ flux_left = cal_flux(con_var_left, pri_var_left, axis)
+ flux_right = cal_flux(con_var_right, pri_var_right, axis)
+
+ flux = 0.5 * (flux_left + flux_right)
+ right_eigen, eigen_vals, left_eigen = self.eigen_composition(pri_var_left, pri_var_right, axis)
+ einsum = ops.Einsum('ij...,jk...,kl...,l...->i...')
+ result = 0.5 * einsum((right_eigen, eigen_vals, left_eigen, (con_var_right - con_var_left)))
+ flux -= result
+
+ return flux
+
+ def roe_avg(self, pri_var_left, pri_var_right):
+ """
+ Compute the average Roe variables and related quantities.
+
+ Inputs:
+ - pri_var_left (Tensor): Primitive variables of the left state.
+ - pri_var_right (Tensor): Primitive variables of the right state.
+
+ Outputs:
+ list: A list containing the computed average Roe variables and related quantities in the following order:
+ - primes_ave (Tensor): The average Roe variables computed using the left and right primitive variables.
+ - c_ave (Tensor): The average speed of sound.
+ - grueneisen (Tensor): The Grueneisen coefficient computed using the average Roe variables.
+ - enthalpy_ave (Tensor): The average total enthalpy.
+ - velocity_square (Tensor): The square of the average velocity magnitude.
+ """
+ enthalpy_left = self.material.total_enthalpy(pri_var_left)
+ enthalpy_right = self.material.total_enthalpy(pri_var_right)
+ alpha = 1 / (mnp.sqrt(abs(pri_var_left[0])) + mnp.sqrt(abs(pri_var_right[0])))
+ primes_ave = (
+ mnp.sqrt(abs(pri_var_left[0])) * pri_var_left + mnp.sqrt(abs(pri_var_right[0])) * pri_var_right
+ ) * alpha
+ enthalpy_ave = (
+ mnp.sqrt(abs(pri_var_left[0])) * enthalpy_left + mnp.sqrt(abs(pri_var_right[0])) * enthalpy_right
+ ) * alpha
+ velocity_square = primes_ave[1] * primes_ave[1] + primes_ave[2] * primes_ave[2] + primes_ave[3] * primes_ave[3]
+ c_ave = mnp.sqrt(abs((self.material.gamma - 1) * (enthalpy_ave - 0.5 * velocity_square)))
+ grueneisen = self.material.grueneisen(primes_ave)
+ return [primes_ave, c_ave, grueneisen, enthalpy_ave, velocity_square]
+
+ def eigen_composition(self, pri_var_left, pri_var_right, axis):
+ """
+ Performs eigen composition for a given axis.
+
+ Inputs:
+ pri_var_left (Tensor): Array of primary variables on the left side of the interface.
+ pri_var_right (Tensor): Array of primary variables on the right side of the interface.
+ axis (int): Axis along which the eigen composition is performed.
+
+ Outputs:
+ tuple: A tuple containing the following elements:
+ - right_eigen (Tensor): Right eigenvector matrix.
+ - eigen_vals (Tensor): Eigenvalue matrix.
+ - left_eigen (Tensor): Left eigenvector matrix.
+ """
+ primes_ave, c_ave, grueneisen, enthalpy_ave, velocity_square = self.roe_avg(pri_var_left, pri_var_right)
+ ek = 0.5 * velocity_square
+ zeros = mnp.zeros_like(primes_ave[0])
+ ones = mnp.ones_like(primes_ave[0])
+
+ right_eigen, eigen_vals, left_eigen = None, None, None
+ gamma_1 = mnp.abs(primes_ave[axis + 1] - c_ave)
+ gamma_234 = mnp.abs(primes_ave[axis + 1])
+ gamma_5 = mnp.abs(primes_ave[axis + 1] + c_ave)
+
+ eigen_vals = mnp.stack(
+ (
+ mnp.stack((gamma_1, zeros, zeros, zeros, zeros)),
+ mnp.stack((zeros, gamma_234, zeros, zeros, zeros)),
+ mnp.stack((zeros, zeros, gamma_234, zeros, zeros)),
+ mnp.stack((zeros, zeros, zeros, gamma_234, zeros)),
+ mnp.stack((zeros, zeros, zeros, zeros, gamma_5)),
+ )
+ )
+
+ if axis == 0:
+ right_eigen = mnp.stack(
+ (
+ mnp.stack((ones, ones, zeros, zeros, ones)),
+ mnp.stack(
+ (
+ primes_ave[1] - c_ave,
+ primes_ave[1],
+ zeros,
+ zeros,
+ primes_ave[1] + c_ave,
+ )
+ ),
+ mnp.stack((primes_ave[2], primes_ave[2], -ones, zeros, primes_ave[2])),
+ mnp.stack((primes_ave[3], primes_ave[3], zeros, ones, primes_ave[3])),
+ mnp.stack(
+ (
+ enthalpy_ave - primes_ave[1] * c_ave,
+ ek,
+ -primes_ave[2],
+ primes_ave[3],
+ enthalpy_ave + primes_ave[1] * c_ave,
+ )
+ ),
+ )
+ )
+
+ left_eigen = (
+ grueneisen
+ / 2
+ / c_ave**2
+ * mnp.stack(
+ (
+ mnp.stack(
+ (
+ ek + c_ave / grueneisen * primes_ave[1],
+ -primes_ave[1] - c_ave / grueneisen,
+ -primes_ave[2],
+ -primes_ave[3],
+ ones,
+ )
+ ),
+ mnp.stack(
+ (
+ 2 / grueneisen * c_ave**2 - velocity_square,
+ 2 * primes_ave[1],
+ 2 * primes_ave[2],
+ 2 * primes_ave[3],
+ -2 * ones,
+ )
+ ),
+ mnp.stack(
+ (
+ 2 * c_ave**2 / grueneisen * primes_ave[2],
+ zeros,
+ -2 * c_ave**2 / grueneisen,
+ zeros,
+ zeros,
+ )
+ ),
+ mnp.stack(
+ (
+ -2 * c_ave**2 / grueneisen * primes_ave[3],
+ zeros,
+ zeros,
+ 2 * c_ave**2 / grueneisen,
+ zeros,
+ )
+ ),
+ mnp.stack(
+ (
+ ek - c_ave / grueneisen * primes_ave[1],
+ -primes_ave[1] + c_ave / grueneisen,
+ -primes_ave[2],
+ -primes_ave[3],
+ ones,
+ )
+ ),
+ )
+ )
+ )
+
+ # # Y - DIRECTION
+ elif axis == 1:
+ right_eigen = mnp.stack(
+ (
+ mnp.stack((ones, zeros, ones, zeros, ones)),
+ mnp.stack((primes_ave[1], ones, primes_ave[1], zeros, primes_ave[1])),
+ mnp.stack(
+ (
+ primes_ave[2] - c_ave,
+ zeros,
+ primes_ave[2],
+ zeros,
+ primes_ave[2] + c_ave,
+ )
+ ),
+ mnp.stack((primes_ave[3], zeros, primes_ave[3], -ones, primes_ave[3])),
+ mnp.stack(
+ (
+ enthalpy_ave - primes_ave[2] * c_ave,
+ primes_ave[1],
+ ek,
+ -primes_ave[3],
+ enthalpy_ave + primes_ave[2] * c_ave,
+ )
+ ),
+ )
+ )
+
+ left_eigen = (
+ grueneisen
+ / 2
+ / c_ave**2
+ * mnp.stack(
+ (
+ mnp.stack(
+ (
+ ek + c_ave / grueneisen * primes_ave[2],
+ -primes_ave[1],
+ -primes_ave[2] - c_ave / grueneisen,
+ -primes_ave[3],
+ ones,
+ )
+ ),
+ mnp.stack(
+ (
+ -2 * c_ave**2 / grueneisen * primes_ave[1],
+ 2 * c_ave**2 / grueneisen,
+ zeros,
+ zeros,
+ zeros,
+ )
+ ),
+ mnp.stack(
+ (
+ 2 / grueneisen * c_ave**2 - velocity_square,
+ 2 * primes_ave[1],
+ 2 * primes_ave[2],
+ 2 * primes_ave[3],
+ -2 * ones,
+ )
+ ),
+ mnp.stack(
+ (
+ 2 * c_ave**2 / grueneisen * primes_ave[3],
+ zeros,
+ zeros,
+ -2 * c_ave**2 / grueneisen,
+ zeros,
+ )
+ ),
+ mnp.stack(
+ (
+ ek - c_ave / grueneisen * primes_ave[2],
+ -primes_ave[1],
+ -primes_ave[2] + c_ave / grueneisen,
+ -primes_ave[3],
+ ones,
+ )
+ ),
+ )
+ )
+ )
+
+ # # Z - DIRECTION
+ elif axis == 2:
+ right_eigen = mnp.stack(
+ (
+ mnp.stack((ones, zeros, zeros, ones, ones)),
+ mnp.stack((primes_ave[1], -ones, zeros, primes_ave[1], primes_ave[1])),
+ mnp.stack((primes_ave[2], zeros, ones, primes_ave[2], primes_ave[2])),
+ mnp.stack(
+ (
+ primes_ave[3] - c_ave,
+ zeros,
+ zeros,
+ primes_ave[3],
+ primes_ave[3] + c_ave,
+ )
+ ),
+ mnp.stack(
+ (
+ enthalpy_ave - primes_ave[3] * c_ave,
+ -primes_ave[1],
+ primes_ave[2],
+ ek,
+ enthalpy_ave + primes_ave[3] * c_ave,
+ )
+ ),
+ )
+ )
+
+ left_eigen = (
+ grueneisen
+ / 2
+ / c_ave**2
+ * mnp.stack(
+ (
+ mnp.stack(
+ (
+ ek + c_ave / grueneisen * primes_ave[3],
+ -primes_ave[1],
+ -primes_ave[2],
+ -primes_ave[3] - c_ave / grueneisen,
+ ones,
+ )
+ ),
+ mnp.stack(
+ (
+ 2 * c_ave**2 / grueneisen * primes_ave[1],
+ -2 * c_ave**2 / grueneisen,
+ zeros,
+ zeros,
+ zeros,
+ )
+ ),
+ mnp.stack(
+ (
+ -2 * c_ave**2 / grueneisen * primes_ave[2],
+ zeros,
+ 2 * c_ave**2 / grueneisen,
+ zeros,
+ zeros,
+ )
+ ),
+ mnp.stack(
+ (
+ 2 / grueneisen * c_ave**2 - velocity_square,
+ 2 * primes_ave[1],
+ 2 * primes_ave[2],
+ 2 * primes_ave[3],
+ -2 * ones,
+ ),
+ ),
+ mnp.stack(
+ (
+ ek - c_ave / grueneisen * primes_ave[3],
+ -primes_ave[1],
+ -primes_ave[2],
+ -primes_ave[3] + c_ave / grueneisen,
+ ones,
+ )
+ ),
+ )
+ )
+ )
+ return right_eigen, eigen_vals, left_eigen
diff --git a/MindFlow/mindflow/geometry/geom_utils.py b/MindFlow/mindflow/geometry/geom_utils.py
index c2f5876af44c8fbf5b358acf2b9c908913fb4cb3..3f77013f6253f238e5e75c4fd9a9605f7ac94d20 100644
--- a/MindFlow/mindflow/geometry/geom_utils.py
+++ b/MindFlow/mindflow/geometry/geom_utils.py
@@ -15,7 +15,7 @@
"""utils for geometry"""
from __future__ import absolute_import
import numpy as np
-import scipy.stats as ss
+from scipy.stats import qmc
from .geometry_base import PartSamplingConfig, SamplingConfig, GEOM_TYPES, SAMPLER_TYPES
from ..utils.check_func import check_param_type
@@ -69,9 +69,9 @@ def generate_sampling_config(dict_config):
_sampler_method = {
- "lhs": ss.qmc.LatinHypercube,
- "halton": ss.qmc.Halton,
- "sobol": ss.qmc.Sobol,
+ "lhs": qmc.LatinHypercube,
+ "halton": qmc.Halton,
+ "sobol": qmc.Sobol,
"uniform": np.random.rand
}
diff --git a/MindSPONGE/README.md b/MindSPONGE/README.md
index 7990dc611e994ca9aa3816a47a307b0ec2d56037..605e8a09a930d4fa9d64f205b2c00d8585ab4207 100644
--- a/MindSPONGE/README.md
+++ b/MindSPONGE/README.md
@@ -18,17 +18,19 @@ MindSpore SPONGE(Simulation Package tOwards Next GEneration molecular modelling)
## **Latest News** 📰
- 🔥`Top` [**open source internship task**](https://gitee.com/mindspore/community/issues/I561LI?from=project-issue) has been released! Everyone is welcome to claim it~
-- 🔥`2023.1.31` MindSPONGE version 1.0.0-alpha is released. The documents are available on [**Scientific Computing MindSPONGE module**](https://mindspore.cn/mindsponge/docs/en/r1.0.0-alpha/index.html) on MindSpore website
-- `2022.8.23` Paper "Few-Shot Learning of Accurate Folding Landscape for Protein Structure Prediction" is preprinted in arxiv, Please refer to [Paper](https://arxiv.org/abs/2208.09652)
+- 🔥`2023.6.26` MindSPONGE Paper "Artificial Intelligence Enhanced Molecular Simulations" is published in JCTC and achieve Most Read Articles. Please refer to [paper](https://pubs.acs.org/doi/10.1021/acs.jctc.3c00214).
+- 🔥`2023.5.31` Paper "Assisting and Accelerating NMR Assignment with Restrained Structure Prediction" is preprinted in arxiv, Please refer to [paper](https://arxiv.org/abs/2208.09652) and [code](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/research/FAAST/).
+- `2023.1.31` MindSPONGE version 1.0.0-alpha is released. The documents are available on [**Scientific Computing MindSPONGE module**](https://mindspore.cn/mindsponge/docs/en/r1.0.0-alpha/index.html) on MindSpore website
+- `2022.8.23` Paper "Few-Shot Learning of Accurate Folding Landscape for Protein Structure Prediction" is preprinted in arxiv, Please refer to [paper](https://arxiv.org/abs/2208.09652)
- `2022.8.11—2022.8.15` MindSpore SPONGE SIG [**Summer School**](#special-interesting-group-), [**replay**](https://www.bilibili.com/video/BV1pB4y167yS?spm_id_from=333.999.0.0&vd_source=94e532d8ff646603295d235e65ef1453)
- `2022.07.18` Paper "SPONGE: A GPU-Accelerated Molecular Dynamics Package with Enhanced Sampling and AI-Driven Algorithms"is published in Chinese Journal of Chemistry. Please refer to [paper](https://onlinelibrary.wiley.com/doi/epdf/10.1002/cjoc.202100456) and [codes](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/mindsponge/ccsrc/molecular_dynamics)
- `2022.07.09` MEGA-Assessment wins CAMEO-QE monthly 1st
-- `2022.06.27` Paper "PSP: Million-level Protein Sequence Dataset for Protein Structure Prediction" is preprinted in arxiv. Please refer to [Paper](https://arxiv.org/pdf/2206.12240v1.pdf) and [codes](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/MEGAProtein).
+- `2022.06.27` Paper "PSP: Million-level Protein Sequence Dataset for Protein Structure Prediction" is preprinted in arxiv. Please refer to [paper](https://arxiv.org/pdf/2206.12240v1.pdf) and [codes](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/MEGAProtein).
- `2022.04.21` MEGA-Fold wins CAMEO-3D monthly 1st. [Related News](https://www.huawei.com/cn/news/2022/4/mindspore-cameo-protein-ascend)
## **Coming Soon** 🚀
-- Everything is coming soon, don't worry~
+- The third Summer School activity will be held in Peking University from August 21 to August 25, 2023. The activity is in preparation. Please look forward to it!
## **Quick Start**
@@ -131,6 +133,7 @@ md.run(1000, callbacks=[run_info, cb_h5md])
**More Cases**:👀
+- [NMR Data Automatic Analysis FAAST](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/research/FAAST)
- [Protein Relaxation](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/molecular_dynamics/protein_relaxation)
- [Protein Structure Prediction MEGA-Fold](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/MEGAProtein/)
- [Protein Structure Assessment MEGA-Assessment](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/MEGAProtein/)
@@ -148,7 +151,8 @@ Due to the dependency between MindSPONGE and MindSpore, please follow the table
| MindSPONGE Version | Branch | MindSpore Version | Python Version |
|:------------------:|:------------------------------------------------------------------------:|:-----------------:|:--------------:|
-| 1.0.0 | [master](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE) | \>=2.0.0-alpha | \>=3.7 |
+| master | [master](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE) | \>=2.0.0 | \>=3.7 |
+| 1.0.0 | [r0.2.0](https://gitee.com/mindspore/mindscience/tree/r0.2.0/MindSPONGE) | \>=2.0.0 | \>=3.7 |
```bash
pip install -r requirements.txt
diff --git a/MindSPONGE/README_CN.md b/MindSPONGE/README_CN.md
index 168db662dcdb6865e09b541d10715256bcc90cd5..78ddb255c4ef54187008d9e2157ed16c7dbe015f 100644
--- a/MindSPONGE/README_CN.md
+++ b/MindSPONGE/README_CN.md
@@ -17,7 +17,9 @@ MindSpore SPONGE(Simulation Package tOwards Next GEneration molecular modelling)
## **最新消息** 📰
- 🔥`置顶` [**开源实习任务**](https://gitee.com/mindspore/community/issues/I561LI?from=project-issue)发布!欢迎大家认领~
-- 🔥`2023.1.31` MindSPONGE 1.0.0-alpha版本发布,文档介绍可参见MindSpore官网中的[**科学计算套件MindSPONGE模块**](https://mindspore.cn/mindsponge/docs/zh-CN/r1.0.0-alpha/index.html)
+- 🔥`2023.6.26` MindSPONGE论文"Artificial Intelligence Enhanced Molecular Simulations"发表于计算化学期刊JCTC,同时当选Most Read Articles,详情参见[论文](https://pubs.acs.org/doi/10.1021/acs.jctc.3c00214)
+- 🔥`2023.5.31` NMR核磁共振动态蛋白质结构解析方法正式开源,详情参见论文 [Assisting and Accelerating NMR Assignment with Restrained Structure Prediction](https://www.biorxiv.org/content/10.1101/2023.04.14.536890v1) 和[代码](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/research/FAAST/)
+- `2023.1.31` MindSPONGE 1.0.0-alpha版本发布,文档介绍可参见MindSpore官网中的[**科学计算套件MindSPONGE模块**](https://mindspore.cn/mindsponge/docs/zh-CN/r1.0.0-alpha/index.html)
- `2022.8.23` 论文"Few-Shot Learning of Accurate Folding Landscape for Protein Structure Prediction" arxiv预印,详情参见[论文](https://arxiv.org/abs/2208.09652)
- `2022.8.11—2022.8.15` MindSpore SPONGE SIG[**暑期学校活动**](#sig-),[**活动回放**](https://www.bilibili.com/video/BV1pB4y167yS?spm_id_from=333.999.0.0&vd_source=94e532d8ff646603295d235e65ef1453)
- `2022.07.18` 论文"SPONGE: A GPU-Accelerated Molecular Dynamics Package with Enhanced Sampling and AI-Driven Algorithms"发表于期刊Chinese Journal of Chemistry,详情参见[论文](https://onlinelibrary.wiley.com/doi/epdf/10.1002/cjoc.202100456)和[代码](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/mindsponge/ccsrc/molecular_dynamics)
@@ -27,7 +29,7 @@ MindSpore SPONGE(Simulation Package tOwards Next GEneration molecular modelling)
## **即将到来** 🚀
-- 不要着急,精彩即将到来~
+- 第三期暑期学校活动将于2023年8月21日——8月25日在北京大学举行,活动正在火热筹备中,敬请期待!
## **初体验**
@@ -108,6 +110,7 @@ md.run(1000, callbacks=[run_info, cb_h5md])
**更多应用案例请见**:👀
+- [NMR数据自动解析 FAAST](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/research/FAAST)
- [蛋白质结构弛豫](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/molecular_dynamics/protein_relaxation)
- [蛋白质结构预测 MEGA-Fold](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/MEGAProtein/)
- [蛋白质结构评估 MEGA-Assessment](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/MEGAProtein/)
@@ -125,7 +128,8 @@ md.run(1000, callbacks=[run_info, cb_h5md])
| MindSPONGE | 分支 | MindSpore | Python |
| :--------: | :-------------------------------------------------------------------: | :-------: | :----: |
-| 1.0.0 | [master](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE) | \>=2.0.0 | \>=3.7 |
+| master | [master](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE) | \>=2.0.0 | \>=3.7 |
+| 1.0.0 | [r0.2.0](https://gitee.com/mindspore/mindscience/tree/r0.2.0/MindSPONGE) | \>=2.0.0 | \>=3.7 |
```bash
pip install -r requirements.txt
diff --git a/MindSPONGE/RELEASE.md b/MindSPONGE/RELEASE.md
index dda9a90aaefd508860821ec7dd2d97090293ace5..c1ffba8310544188ad80269d8e379ba2d41d04ad 100644
--- a/MindSPONGE/RELEASE.md
+++ b/MindSPONGE/RELEASE.md
@@ -1,10 +1,10 @@
-# MindSPONGE Release Notes
+# MindSpore SPONGE Release Notes
[查看中文](./RELEASE_CN.md)
-## MindSPONGE 1.0.0-rc1 Release Notes
+## MindSpore SPONGE 1.0.0-rc1 Release Notes
-MindSPONGE(Simulation Package tOwards Next GEneration molecular modelling) is a toolkit for Computational Biology based on AI framework MindSpore,which supports MD, folding and so on. It aims to provide efficient AI computational biology software for a wide range of scientific researchers, staff, teachers and students.
+MindSpore SPONGE(Simulation Package tOwards Next GEneration molecular modelling) is a toolkit for Computational Biology based on AI framework MindSpore,which supports MD, folding and so on. It aims to provide efficient AI computational biology software for a wide range of scientific researchers, staff, teachers and students.
### Major Features and Improvements
@@ -37,4 +37,4 @@ Thanks goes to these wonderful people:
yufan, gaoyiqin, wangzidong, lujiale, chuht, wangmin0104, mamba_ni, yujialiang, melody, Yesterday, xiayijie, jun.zhang, siruil, Dechin Chen, 十六夜, wangchenghao, liushuo, lijunbin.
-Contributions of any kind are welcome!
\ No newline at end of file
+Contributions of any kind are welcome!
diff --git a/MindSPONGE/RELEASE_CN.md b/MindSPONGE/RELEASE_CN.md
index cdd7be96f073c3f4f2b4d1307421fb06bf40ad23..82f7f429381f65b16e1578afafa5c7c931fabe9a 100644
--- a/MindSPONGE/RELEASE_CN.md
+++ b/MindSPONGE/RELEASE_CN.md
@@ -1,10 +1,10 @@
-# MindSPONGE Release Notes
+# MindSpore SPONGE Release Notes
[View English](./RELEASE.md)
-## MindSPONGE 1.0.0-rc1 Release Notes
+## MindSpore SPONGE 1.0.0-rc1 Release Notes
-MindSPONGE(Simulation Package tOwards Next GEneration molecular modelling)是基于昇思MindSpore的计算生物领域套件,支持分子动力学、蛋白质折叠等常用功能,旨在于为广大的科研人员、老师及学生提供高效易用的AI计算生物软件。
+MindSpore SPONGE(Simulation Package tOwards Next GEneration molecular modelling)是基于昇思MindSpore的计算生物领域套件,支持分子动力学、蛋白质折叠等常用功能,旨在于为广大的科研人员、老师及学生提供高效易用的AI计算生物软件。
### 主要特性和增强
@@ -37,4 +37,4 @@ MindSPONGE(Simulation Package tOwards Next GEneration molecular modelling)是基
yufan, gaoyiqin, wangzidong, lujiale, chuht, wangmin0104, mamba_ni, yujialiang, melody, Yesterday, xiayijie, jun.zhang, siruil, Dechin Chen, 十六夜, wangchenghao, liushuo, lijunbin.
-欢迎以任何形式对项目提供贡献!
\ No newline at end of file
+欢迎以任何形式对项目提供贡献!
diff --git a/MindSPONGE/applications/MEGAProtein/README_CN.md b/MindSPONGE/applications/MEGAProtein/README_CN.md
index 43b57c6be2c7889cbf801b9f8d54b03780b7ee2d..585199fe3d5fadb1e4624e25461f6f7a46f85690 100644
--- a/MindSPONGE/applications/MEGAProtein/README_CN.md
+++ b/MindSPONGE/applications/MEGAProtein/README_CN.md
@@ -69,18 +69,19 @@ MEGA-Protein主要由三部分组成:
- [MEGA-Protein](#mega-protein)
- - [环境配置](#环境配置)
- - [硬件环境与框架](#硬件环境与框架)
- - [配置数据库检索](#配置数据库检索)
- - [代码目录](#代码目录)
- - [运行示例](#运行示例)
- - [MEGA-Fold蛋白质结构预测](#mega-fold蛋白质结构预测)
- - [MEGA-EvoGen MSA生成/增强](#mega-evogen-msa生成增强)
- - [MEGA-Assessment 蛋白质结构评分&优化](#mega-assessment-蛋白质结构评分优化)
- - [MEGA-Protein整体使用](#mega-protein整体使用)
- - [可用的模型和数据集](#可用的模型和数据集)
- - [引用](#引用)
- - [致谢](#致谢)
+ - [可用的模型和数据集](#%E5%8F%AF%E7%94%A8%E7%9A%84%E6%A8%A1%E5%9E%8B%E5%92%8C%E6%95%B0%E6%8D%AE%E9%9B%86)
+ - [环境配置](#%E7%8E%AF%E5%A2%83%E9%85%8D%E7%BD%AE)
+ - [硬件环境与框架](#%E7%A1%AC%E4%BB%B6%E7%8E%AF%E5%A2%83%E4%B8%8E%E6%A1%86%E6%9E%B6)
+ - [配置数据库检索](#%E9%85%8D%E7%BD%AE%E6%95%B0%E6%8D%AE%E5%BA%93%E6%A3%80%E7%B4%A2)
+ - [代码示例](#%E4%BB%A3%E7%A0%81%E7%A4%BA%E4%BE%8B)
+ - [MEGA-Fold蛋白质结构预测推理](#mega-fold%E8%9B%8B%E7%99%BD%E8%B4%A8%E7%BB%93%E6%9E%84%E9%A2%84%E6%B5%8B%E6%8E%A8%E7%90%86)
+ - [MEGA-Fold蛋白质结构预测训练](#mega-fold%E8%9B%8B%E7%99%BD%E8%B4%A8%E7%BB%93%E6%9E%84%E9%A2%84%E6%B5%8B%E8%AE%AD%E7%BB%83)
+ - [MEGA-EvoGen MSA生成/增强推理](#mega-evogen-msa%E7%94%9F%E6%88%90%E5%A2%9E%E5%BC%BA%E6%8E%A8%E7%90%86)
+ - [MEGA-Assessment 蛋白质结构评分推理](#mega-assessment-%E8%9B%8B%E7%99%BD%E8%B4%A8%E7%BB%93%E6%9E%84%E8%AF%84%E5%88%86%E6%8E%A8%E7%90%86)
+ - [MEGA-Assessment 蛋白质结构评分训练](#mega-assessment-%E8%9B%8B%E7%99%BD%E8%B4%A8%E7%BB%93%E6%9E%84%E8%AF%84%E5%88%86%E8%AE%AD%E7%BB%83)
+ - [MEGA-Protein整体使用](#mega-protein%E6%95%B4%E4%BD%93%E4%BD%BF%E7%94%A8)
+ - [引用](#%E5%BC%95%E7%94%A8)
+ - [致谢](#%E8%87%B4%E8%B0%A2)
@@ -154,14 +155,14 @@ MEGA-Protein主要由三部分组成:
# configuration for template search
hhsearch_binary_path HHsearch可执行文件路径
kalign_binary_path kalign可执行文件路径
- pdb70_database_path pdb70文件夹路径
- mmcif_dir mmcif文件夹路径
+ pdb70_database_path {pdb70文件夹}/pdb70
+ mmcif_dir mmcif文件夹
obsolete_pdbs_path PDB IDs的映射文件路径
max_template_date 模板搜索截止时间,该时间点之后的模板会被过滤掉,默认值"2100-01-01"
# configuration for Multiple Sequence Alignment
mmseqs_binary MMseqs2可执行文件路径
- uniref30_path uniref30文件夹路径
- database_envdb_dir colabfold_envdb_202108文件夹路径
+ uniref30_path {uniref30文件夹}/uniref30_2103_db
+ database_envdb_dir {colabfold_envdb文件夹}/colabfold_envdb_202108_db
a3m_result_path mmseqs2检索结果(msa)的保存路径,默认值"./a3m_result/"
```
@@ -233,6 +234,8 @@ MEGA-Protein主要由三部分组成:
{"pre_process_time": 0.61, "model_time": 87.5, "pos_process_time": 0.02, "all_time ": 88.12, "confidence ": 93.5}
```
+*注:1、样例推理不包含数据库检索,检索耗时数分钟至数十分钟不等。2、多条序列推理时首条序列需编译网络,耗时可能更长,第二条起恢复正常。*
+
MEGA-Fold预测结果与真实结果对比:
- 7VGB_A,长度711,lDDT 92.3:
diff --git a/MindSPONGE/applications/MEGAProtein/data/preprocess.py b/MindSPONGE/applications/MEGAProtein/data/preprocess.py
index 2b59b8343bc66e024c186279547aac936866119a..a7d99a4a315841f60adba416ff9934ed1253d8f8 100644
--- a/MindSPONGE/applications/MEGAProtein/data/preprocess.py
+++ b/MindSPONGE/applications/MEGAProtein/data/preprocess.py
@@ -473,7 +473,9 @@ class Feature:
"all_atom_positions", "rigidgroups_gt_frames", "rigidgroups_gt_exists",
"rigidgroups_alt_gt_frames", "torsion_angles_sin_cos", "chi_mask"]
label_arrays = [features[key] for key in label_keys]
- label_arrays = [array[0] for array in label_arrays]
+ for i, _ in enumerate(label_arrays):
+ if i not in (3, 4):
+ label_arrays[i] = label_arrays[i][0]
label_arrays = [array.astype(dtype) if array.dtype == "float64" else array for array in label_arrays]
label_arrays = [array.astype(dtype) if array.dtype == "float32" else array for array in label_arrays]
res = [arrays, prev_pos, prev_msa_first_row, prev_pair, label_arrays]
diff --git a/MindSPONGE/applications/MEGAProtein/main.py b/MindSPONGE/applications/MEGAProtein/main.py
index 39d588db947410983f53432469977b96aaea6dac..308a2dbac9cf113eabf207b8aecf1f7f8cafc4dc 100644
--- a/MindSPONGE/applications/MEGAProtein/main.py
+++ b/MindSPONGE/applications/MEGAProtein/main.py
@@ -192,7 +192,7 @@ def fold_train(args):
prev_pos, prev_msa_first_row, prev_pair = Tensor(d["prev_pos"]), Tensor(d["prev_msa_first_row"]), \
Tensor(d["prev_pair"])
ground_truth = d["pseudo_beta_gt"], d["pseudo_beta_mask_gt"], d["all_atom_mask_gt"], \
- d["true_msa"], d["bert_mask"], d["residx_atom14_to_atom37"], \
+ d["true_msa"][max_recycle], d["bert_mask"][max_recycle], d["residx_atom14_to_atom37"], \
d["restype_atom14_bond_lower_bound"], d["restype_atom14_bond_upper_bound"], \
d["atomtype_radius"], d["backbone_affine_tensor"], d["backbone_affine_mask"], \
d["atom14_gt_positions"], d["atom14_alt_gt_positions"], d["atom14_atom_is_ambiguous"], \
@@ -268,7 +268,7 @@ def assessment_infer(args):
prev_pair)
for pdb_name in os.listdir(args.decoy_pdb_path):
decoy_atom_positions, decoy_atom_mask, align_mask = \
- process_pdb(feat[4][0], ori_res_length, os.path.join(args.decoy_pdb_path, pdb_name))
+ process_pdb(feat[4][0], ori_res_length, os.path.join(args.decoy_pdb_path, pdb_name))
plddt = megaassessment(*feat_i, prev_pos, prev_msa_first_row, prev_pair,
Tensor(decoy_atom_positions), Tensor(decoy_atom_mask), run_pretrain=False)
t3 = time.time()
diff --git a/MindSPONGE/applications/MEGAProtein/module/loss_module.py b/MindSPONGE/applications/MEGAProtein/module/loss_module.py
index ace828a662bc4b5e8572aca40d6bd9357520574f..18ab09eae9d6132267284410e9db909fcb5f0fa9 100644
--- a/MindSPONGE/applications/MEGAProtein/module/loss_module.py
+++ b/MindSPONGE/applications/MEGAProtein/module/loss_module.py
@@ -90,7 +90,7 @@ class LossNet(nn.Cell):
def softmax_cross_entropy(self, logits, labels):
"""Computes softmax cross entropy given logits and one-hot class labels."""
- loss = -mnp.sum(labels * P.Log()(nn.Softmax()(logits)), axis=-1)
+ loss = -mnp.sum(labels * nn.LogSoftmax()(logits), axis=-1)
return mnp.asarray(loss)
def distogram_loss(self, logits, bin_edges, pseudo_beta, pseudo_beta_mask):
diff --git a/MindSPONGE/applications/README.md b/MindSPONGE/applications/README.md
index 0b9a73b70557241de4f8d15dfd4f79ed32fbea68..ae1383f81c399e5a1f5836d209eed5562b3b039a 100644
--- a/MindSPONGE/applications/README.md
+++ b/MindSPONGE/applications/README.md
@@ -35,6 +35,7 @@ MindSPONGE还集成了20个自研以及业界主流模型,主要涵盖分子
- [MEGA-Fold](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/MEGAProtein/model/fold.py)
- [MEGA-EvoGen](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/MEGAProtein/model/evogen.py)
- [MEGA-Assessment](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/MEGAProtein/model/assessment.py)
+ - [FAAST&RASP](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/research/FAAST)
- [Multimer-AlphaFold](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/research/Multimer)
- [UFold](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE/applications/research/UFold)
- 性质预测
diff --git a/MindSPONGE/applications/model cards/ColabDesign.md b/MindSPONGE/applications/model cards/ColabDesign.md
new file mode 100644
index 0000000000000000000000000000000000000000..e7cd82beb39cad323a9408706b3fb89ceb083f4c
--- /dev/null
+++ b/MindSPONGE/applications/model cards/ColabDesign.md
@@ -0,0 +1,56 @@
+# ColabDesign
+
+## 模型介绍
+
+对于一个骨架结构位置坐标已知但氨基酸种类未知的蛋白质,假定它的长度为n,该序列共有20的n次方种可能性,然而自然界中现存的蛋白质样本只占这庞大集合中的一小部分,难以通过遍历的方式筛选到合理的氨基酸序列。因此,蛋白质设计任务即为通过计算的方式,找到可以形成该pdb结构的蛋白质氨基酸序列。
+
+ColabDesign是蛋白质设计模型,通过输入蛋白质骨架坐标的pdb文件,基于蛋白质结构预测模型来预测整个蛋白质序列,被称为Hallucination and Inpainting。
+
+通常设计具有某种特定功能的蛋白质共需要2个步骤:
+
+- 识别特定功能的可能活性位点的几何形状与氨基酸种类,如酶的活性位点,蛋白抑制剂等。
+- 设计一个包含这些特定活性位点的氨基酸序列,并折叠成对应三维结构。
+
+步骤2为ColabDesign主要解决的问题,用固定位点或者骨架作为输入,产生完整序列。
+
+最早对该项目进行探索的方法为[trDesign](https://www.biorxiv.org/content/10.1101/2020.07.23.218917v1.abstract),使用了trDesign和Rosetta结合的方式。之后[Hallucination](https://www.nature.com/articles/s41586-021-04184-w)基于trDesign,借鉴了DeepDream模型,以Hallucination+trDesign的设计方式进一步提升了效果。在融合功能[Motif](https://www.biorxiv.org/content/10.1101/2020.11.29.402743v1.abstract)设计方法出现之后,将trDesign和Hallucination相结合,解决了对预先生成的scaffold数据库的依赖问题。
+
+在这之后,ColabDesign以RoseTTAFold为核心进行实验,在AlphaFold2上进行交叉验证,基于这两个模型的Hallucination被称为“RFdesign”和“AFdesign”。RoseTTAFold显式地利用了三维结构坐标,相比trRosetta只利用二维特征信息,它有着更多地信息来定义各类loss,解决更多之前不可解决的问题,大幅提升了实验精度。
+
+
+
+A图为Free hallucination,将序列传入trRosetta或者RoseTTAFold预测3D结构,使用MCMC迭代优化loss函数来产生序列。B图为Constrained hallucination,使用与A图相同的方式,但是loss函数除了结构信息之外还包含了Motif重述和其他特定任务信息。C图为缺失信息恢复任务,通过输入部分序列或者部分结构信息来补齐完整序列或结构。D图为可以通过约束幻觉和相应的损失函数,即本文的方法来解决的设计问题。E图为本文方法概览,本文中的蛋白质设计挑战为多种场景下的缺失信息恢复任务。
+
+## 使用限制
+
+该Pipeline中的ColabDesign与最初的ColabDesign不同,没有基于RoseTTAFold和AlphaFold 2,而是基于MEGA-Protein实现了Hallucination和fixbb两个功能。
+
+该模型目前只支持推理,即输入蛋白质pdb文件,输出设计后地氨基酸序列。暂未提供模型训练方法与训练数据集。
+
+## 如何使用
+
+可使用PDB文件作为模型推理输入,样例代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+
+pipe = PipeLine(name = "ColabDesign")
+pipe.set_device_id(0)
+pipe.initialize("fold_design")
+pipe.model.from_pretrained()
+res = pipe.predict({YOUR_PDB_PATH})
+print(res)
+```
+
+## 引用
+
+```bash
+@article{wang2021deep,
+ title={Deep learning methods for designing proteins scaffolding functional sites},
+ author={Wang, Jue and Lisanza, Sidney and Juergens, David and Tischer, Doug and Anishchenko, Ivan and Baek, Minkyung and Watson, Joseph L and Chun, Jung Ho and Milles, Lukas F and Dauparas, Justas and others},
+ journal={BioRxiv},
+ pages={2021--11},
+ year={2021},
+ publisher={Cold Spring Harbor Laboratory}
+}
+```
diff --git a/MindSPONGE/applications/model cards/DeepFri.md b/MindSPONGE/applications/model cards/DeepFri.md
new file mode 100644
index 0000000000000000000000000000000000000000..24a97d66cb32dfabf88fd074ae2a1a484007aa92
--- /dev/null
+++ b/MindSPONGE/applications/model cards/DeepFri.md
@@ -0,0 +1,61 @@
+# DeepFRI
+
+## 模型介绍
+
+DeepFRI是一种图形卷积网络,通过利用从蛋白质语言模型和蛋白质结构中提取的序列特征来预测蛋白质功能。 它可以对蛋白质进行四个方面的预测: 分子功能(Molecular Function, MF)、细胞组分(Cellular Component, CC)、生物过程(Biological Process, BP)、EC编号(Enzyme Commission, EC)。
+
+- MF、CC、BP是Gene Ontology(基因本体论)的三大独立的本体论词汇表。GO是一个国际标准化的基因功能分类体系, 提供了一套动态并可控的词汇表来全面描述生物体中基因和基因产物的属性,它由一组预先定义好的GO术语(GO term)组成,这组术语对基因产物的功能进行限定和描述。 GO terms是对基因的产物而不是基因本身进行描述,因为基因的产物有时候不止一种,而GO name则是该GO term的具体名称。 DeepFRI将输出MF、CC、BP对应的GO term与GO name。
+
+- EC编号或EC号是酶学委员会(Enzyme Commission)为酶所制作的一套编号分类法,是以每种酶所催化的化学反应为分类基础。 这套分类法亦同时会为各种酶给予一个建议的名称,所以亦称为酶学委员会命名法。 针对EC,DeepFRI将直接输出其EC编号。
+
+
+
+## 数据集
+
+训练所用数据集分别为从PDB数据库和SWISS-MODEL数据库中挑选的条目构建的集合。选取带有注释的PDB链与SWISS-MODEL链,删除相同和相似的序列, 通过在95%序列同一性(即序列比对中残基总数中相同残基的数量)下对所有PDB链和SWISS-MODEL链(能够检索到contact map)进行聚类来创建非冗余集。
+
+- PDB蛋白质结构数据库(Protein Data Bank,简称PDB)是美国Brookhaven国家实验室于1971年创建的,由结构生物信息学研究合作组织(Research Collaboratory for Structural Bioinformatics,简称RCSB)维护。
+
+- SWISS-MODEL知识库是一个蛋白质3D结构数据库,库中收录的蛋白质结构都是使用SWISS-MODEL同源建模方法(homology-modelling)得来的。
+
+## 使用限制
+
+该模型基于SWISS-MODEL数据库中数据进行训练,但是由于数据集链接失效无法获取数据集,因此该模型在PipeLine中不提供训练功能。
+
+## 如何使用
+
+可使用PDB文件作为模型推理输入,通过改变初始化时所使用的key,即可选择不同的任务,样例代码如下所示:
+
+```bash
+import mindspore as ms
+from mindsponge import PipeLine
+
+ms.set_context(mode=ms.GRAPH_MODE)
+
+pipe = PipeLine(name="DeepFri")
+pipe.set_device_id(0)
+
+# 可选任务共三种,MF,CC和BP
+pipe.initialize(key="deepfri_mf")
+# pipe.initialize(key="deepfri_cc")
+# pipe.initialize(key="deepfri_bp")
+
+pipe.model.from_pretrained()
+result = pipe.predict({YOUR_PDB_PATH})
+print(result)
+```
+
+## 引用
+
+```bash
+@article{gligorijevic2021structure,
+ title={Structure-based protein function prediction using graph convolutional networks},
+ author={Gligorijevi{\'c}, Vladimir and Renfrew, P Douglas and Kosciolek, Tomasz and Leman, Julia Koehler and Berenberg, Daniel and Vatanen, Tommi and Chandler, Chris and Taylor, Bryn C and Fisk, Ian M and Vlamakis, Hera and others},
+ journal={Nature communications},
+ volume={12},
+ number={1},
+ pages={3168},
+ year={2021},
+ publisher={Nature Publishing Group UK London}
+}
+```
diff --git a/MindSPONGE/applications/model cards/ESM-2.md b/MindSPONGE/applications/model cards/ESM-2.md
new file mode 100644
index 0000000000000000000000000000000000000000..0f07fbb0d78c88f5de8f356600b986b3571bc101
--- /dev/null
+++ b/MindSPONGE/applications/model cards/ESM-2.md
@@ -0,0 +1,50 @@
+# ESM-2
+
+## 模型介绍
+
+[ESM-2](https://www.biorxiv.org/content/10.1101/2022.07.20.500902v1.full.pdf?utm_campaign=M2D2%20Community%20Round-Up&utm_medium=email&utm_source=Revue%20newsletter)是蛋白质语言模型。
+
+ESM-2系列模型是迄今为止训练的最大的蛋白质语言模型,其参数仅比最近开发的最大文本模型少一个数量级。
+ESM-2是一个基于transformer的语言模型,它使用注意力机制来学习输入序列中氨基酸对之间的相互作用。
+ESM-2比以前的模型有了实质性的改进,即使在150M参数下,ESM-2也比在650M参数下的ESM-1生成语言模型捕获了更准确的结构图像。
+
+相对于上一代模型ESM-1b,改进了模型体系结构、训练参数,并增加了计算资源和数据。添加相对位置嵌入可以推广到任意长度序列。这些修改导致了模型效果更好。
+具有150M参数的ESM-2模型比具有650M参数的ESM-1b模型性能更好。在结构预测基准上,它的表现也优于其他最近的蛋白质语言模型。这种性能的提高与在大型语言建模领域建立的缩放定律一致。
+15B参数ESM-2模型仅比已经训练过的最大最先进的文本语言模型小一个数量级,如Chinchilla(700亿参数)、GPT3和OPT-175B(都是1750亿参数)和PALM(5400亿参数)。
+
+ESM-2的预训练模型采样的数据集为UR50/D 2021_04。
+
+当前PipeLine中ESM-2只提供推理,暂不支持训练。
+
+## 如何使用
+
+EMS-2运行样例代码如下所示。
+
+```bash
+import numpy as np
+from mindsponge.pipeline import PipeLine
+
+pipeline = PipeLine('ESM2')
+pipeline.initialize('config')
+pipeline.model.from_pretrained()
+data = [("protein3", "KAISQ")]
+kwargs = {"return_contacts": True}
+_, _, _, contacts = pipeline.predict(data, **kwargs)
+contacts = contacts.asnumpy()
+tokens_len = pipeline.dataset.batch_lens[0]
+attention_contacts = contacts[0]
+matrix = attention_contacts[: tokens_len, : tokens_len]
+print("contact map", matrix)
+```
+
+## 引用
+
+```bash
+@article{lin2022language,
+ title={Language models of protein sequences at the scale of evolution enable accurate structure prediction},
+ author={Lin, Zeming and Akin, Halil and Rao, Roshan and Hie, Brian and Zhu, Zhongkai and Lu, Wenting and Smetanin, Nikita and dos Santos Costa, Allan and Fazel-Zarandi, Maryam and Sercu, Tom and Candido, Sal and others},
+ journal={bioRxiv},
+ year={2022},
+ publisher={Cold Spring Harbor Laboratory}
+}
+```
diff --git a/MindSPONGE/applications/model cards/ESM-IF1.md b/MindSPONGE/applications/model cards/ESM-IF1.md
new file mode 100644
index 0000000000000000000000000000000000000000..cb7c50eeb35a91c21924e8bba77522845c60c263
--- /dev/null
+++ b/MindSPONGE/applications/model cards/ESM-IF1.md
@@ -0,0 +1,78 @@
+# ESM-IF1
+
+## 模型介绍
+
+ESM-IF1为反向折叠模型,通过蛋白质骨架的原子坐标预测蛋白质序列。论文将逆向折叠定义为sequence-to-sequence问题,并使用自回归的编码解码架构进行建模,论文中模型的任务是从蛋白质骨架坐标中预测出它的蛋白质序列,流程如下图所示:
+
+
+
+设计出具有所需特性的蛋白质的氨基酸序列,称为从头蛋白质设计,是生物工程的核心挑战。近期,业界出现了一系列基于神经网络生成式模型来预测结构的序列或直接建模序列的方法。但是由于实验确定的蛋白质结构数量较少,已确定的结构对已知蛋白质序列空间的覆盖率不足0.1%,导致深度学习方法受到了很大的限制。
+
+在ESM-IF1中,模型使用通过AlphaFold2对UniRef50中1200万蛋白质序列进行预测所获得的结构作为自己的训练数据,将训练集的规模增加了三个数量级。
+
+论文中使用Geometric Vector Perceptron(GVP)层来学习向量特征的等变转换和标量特征的不变变换。共提供了GVP-GNN,GVP-GNN-large,由GVP-GNN结构编码器和通用Transformer组成的混合模型三种模型,GVP-GNN和GVP-Transformer都满足以下特征:给定输入坐标的旋转平移变换T,输出应该关于这些变换不变,GVP可参考[论文](https://arxiv.org/abs/2009.01411),GVP模型结构如下:
+
+
+
+## 数据集
+
+ESM-IF1所使用数据集主要为CATH v4.3数据集,包含蛋白质序列信息以及骨架坐标。
+
+| 文件名 | 大小 | 描述 | Data URL |
+| ----------------- | ----- | ----------------------------------- | ------------------------------------------------------------ |
+| `chain_set.jsonl` | 512MB | CATH v4.3数据集蛋白质骨架坐标和序列 | [下载链接](https://dl.fbaipublicfiles.com/fair-esm/data/cath4.3_topologysplit_202206/chain_set.jsonl) |
+| `splits.json` | 197kB | CATH v4.3数据集划分 | [下载链接](https://dl.fbaipublicfiles.com/fair-esm/data/cath4.3_topologysplit_202206/splits.json) |
+
+## 如何使用
+
+ESM-IF1支持使用单个pdb文件进行推理,推理过程如下:
+
+```bash
+from mindsponge import PipeLine
+from mindsponge.common.config_load import load_config
+
+pipe = PipeLine(name="ESM_IF1")
+pipe.set_device_id(0)
+
+# 第一次使用时未获取config文件,执行如下指令模型可自动下载config文件,后续使用可手动修改所需内容
+# from mindsponge.pipeline.pipeline import download_config
+# conf = download_config(pipe.config["sampling"], pipe.config_path + "sampling.yaml")
+# pipe.initialize(conf=conf)
+
+pipe.initialize(key="sampling")
+pipe.model.from_pretrained()
+res = pipe.predict(data={YOUR_PDB_PATH})
+print(res)
+```
+
+## 训练过程
+
+以论文中所提供训练集进行训练,模型训练方法如下:
+
+```bash
+from mindsponge import PipeLine
+
+pipe = PipeLine(name="ESM_IF1")
+pipe.set_device_id(0)
+pipe.initialize("training")
+pipe.train({YOUR_DATA_PATH}+"train_chain_set.jsonl", num_epochs=1)
+```
+
+## 引用
+
+```bash
+@InProceedings{pmlr-v162-hsu22a,
+ title = {Learning inverse folding from millions of predicted structures},
+ author = {Hsu, Chloe and Verkuil, Robert and Liu, Jason and Lin, Zeming and Hie, Brian and Sercu, Tom and Lerer, Adam and Rives, Alexander},
+ booktitle = {Proceedings of the 39th International Conference on Machine Learning},
+ pages = {8946--8970},
+ year = {2022},
+ editor = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan},
+ volume = {162},
+ series = {Proceedings of Machine Learning Research},
+ month = {17--23 Jul},
+ publisher = {PMLR},
+ pdf = {https://proceedings.mlr.press/v162/hsu22a/hsu22a.pdf},
+ url = {https://proceedings.mlr.press/v162/hsu22a.html},
+}
+```
diff --git a/MindSPONGE/applications/model cards/GROVER.MD b/MindSPONGE/applications/model cards/GROVER.MD
new file mode 100644
index 0000000000000000000000000000000000000000..1270a8d9da92ac43ec01beb53e53cbcd2f6eae0d
--- /dev/null
+++ b/MindSPONGE/applications/model cards/GROVER.MD
@@ -0,0 +1,142 @@
+# GROVER
+
+## 模型介绍
+
+GROVER是一个自监督预训练GNN模型,能够对小分子进行编码,可以将消息传递网络集成到Transformer风格的架构中,从大量的未标记的分子数据中学习到丰富的分子结构和语义信息。
+
+GROVER由两个模块组成,节点transformer和边transformer,两者有类似的结构。节点GTransformer结构如下图所示:
+
+
+
+GROVER提出了两个预训练自监督任务,一个是在节点/边层级的上下文属性预测任务,另一个是图级别的motif预测任务。
+
+
+
+分子指纹是一种分子的抽象表征,它将分子编码为一系列比特向量,有助于进行两个化合物之间相似性的比较,Open Babel,RDkit等多种软件均可生成分子指纹,用于后续的虚拟筛选。通过多个自监督任务的训练,GROVER也能够生成原子的特征向量以及小分子的分子指纹,完成分子指纹生成这一下游任务。
+
+## 数据集
+
+数据集可从[此处](https://openi.pcl.ac.cn/dangwv/grover_local)下载,数据集由单个csv文件储存,单条数据为小分子的SMILES式。
+
+```bash
+.
+└─exampledata
+ ├─pretune # 预训练数据集目录
+ | └─tryout.csv
+ └─finetune # 下游数据集目录
+ ├─bbbp.csv # .csv文件为smiles分子式和对应标签的文件
+ ├─clintox.csv
+ ├─bace.csv
+ ├─tox21.csv
+ ├─toxcast.csv
+ ├─freesolv.csv
+ ├─esoll.csv
+ ├─lipo.csv
+ ├─qm7.csv
+ └─qm8.csv
+```
+
+- 分类任务数据集: BBBP | SIDER | ClinTox | BACE | Tox21 | ToxCast
+
+- 回归任务数据集: FreeSolv | ESOL | Lipo | QM7 | QM8
+
+## 使用限制
+
+当前GROVER模型只提供了预训练模型的checkpoint。如进行推理任务,需先针对推理任务完成fine-tune获取所需checkpoint后再进行推理。
+
+## 如何使用
+
+GROVER支持使用单个数据进行推理任务,分类和回归任务的选择通过修改config文件来完成。模型输入为csv文件,将SMILES式存放在csv文件中即可。
+
+```bash
+from mindsponge import PipeLine
+
+pipe = PipeLine(name="Grover")
+pipe.set_device_id(0)
+
+# 第一次使用时未获取config文件,执行如下指令模型可自动下载config文件,后续使用可手动修改所需内容
+# from mindsponge.pipeline.pipeline import download_config
+# conf = download_config(pipe.config["eval"], pipe.config_path + "eval.yaml")
+
+conf = load_config({YOUR_CONFIG_PATH}+"eval.yaml")
+conf.dataset_type = "classification"
+# conf.dataset_type = "regression"
+pipe.initialize(conf=conf)
+pipe.model.from_pretrained()
+data_path = {YOUR_DATA_PATH}+"data.csv"
+result = pipe.predict(data_path)
+print(result)
+```
+
+GROVER同时支持单条数据生成分子指纹,数据输入方式与模型推理相同,具体样例代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+
+data_path = {YOUR_DATA_PATH}+"data.csv"
+pipe = PipeLine(name="Grover")
+pipe.set_device_id(0)
+pipe.initialize(key='gen')
+pipe.model.from_pretrained()
+result = pipe.predict(data_path)
+```
+
+## 训练过程
+
+GROVER共有两种训练模式,第一种为预训练,使用tryout.csv中所包含数据集进行训练,训练样例代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+from mindsponge.common.config_load import load_config
+from mindsponge.pipeline.models.grover.grover_dataset import GroverDataSet
+
+data_path = {YOUR_DATA_PATH} + "tryout.csv"
+config = load_config("./pretrain.yaml")
+dataset = GroverDataSet(config)
+dataset.set_training_data_src(data_path)
+a = dataset.create_pretrain_dataset()
+pipe = PipeLine(name = "Grover")
+pipe.initialize(key=None, config_path="pretrain.yaml",
+ atom_vocab_size=dataset.config.atom_vocab_size, bond_vocab_size=dataset.config.bond_vocab_size,
+ fg_size=dataset.config.fg_size, steps_per_epoch=a.get_dataset_size())
+pipe.train(data_path, num_epochs = 10)
+```
+
+第二种为模型微调,微调任务分为分类任务和回归任务。通过修改config中的dataset_type可以选择任务类型,训练样例代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+from mindsponge.common.config_load import load_config
+from mindsponge.pipeline.models.grover.grover_dataset import GroverDataSet
+
+# 第一次使用时未获取config文件,执行如下指令模型可自动下载config文件,后续使用可手动修改所需内容
+# from mindsponge.pipeline.pipeline import download_config
+# conf = download_config(pipe.config["finetune"], pipe.config_path + "finetune.yaml")
+
+conf = load_config({YOUR_CONFIG_PATH}+"finetune.yaml")
+conf.dataset_type = "classification"
+# conf.dataset_type = "regression"
+dataset = GroverDataSet(conf)
+dataset.set_training_data_src({YOUR_DATA_PATH}+'bbbp.csv')
+a = dataset.create_grover_dataset()
+pipe = PipeLine(name = "Grover")
+pipe.set_device_id(0)
+pipe.initialize(key=None, conf=conf,
+ steps_per_epoch=a.get_dataset_size(), features_dim=dataset.config.features_dim,
+ output_size=dataset.config.output_size)
+pipe.model.from_pretrained()
+pipe.train({YOUR_DATA_PATH}+'bbbp.csv', num_epochs=10)
+```
+
+## 引用
+
+```bash
+@article{rong2020self,
+ title={Self-supervised graph transformer on large-scale molecular data},
+ author={Rong, Yu and Bian, Yatao and Xu, Tingyang and Xie, Weiyang and Wei, Ying and Huang, Wenbing and Huang, Junzhou},
+ journal={Advances in Neural Information Processing Systems},
+ volume={33},
+ pages={12559--12571},
+ year={2020}
+}
+```
diff --git a/MindSPONGE/applications/model cards/GraphDTA.MD b/MindSPONGE/applications/model cards/GraphDTA.MD
new file mode 100644
index 0000000000000000000000000000000000000000..59d83c219309dae55e089542d0a99151c1138c35
--- /dev/null
+++ b/MindSPONGE/applications/model cards/GraphDTA.MD
@@ -0,0 +1,88 @@
+# GraphDTA
+
+## 模型介绍
+
+随着信息技术和生物,化学的快速发展,使用计算机来辅助药物开发的计算机辅助药物设计(Computer-Aided Drug Design, CADD)流程开始被业界广泛接受并投入使用。在CADD流程中,针对一个已知的致病蛋白,需要从十亿级规模的小分子库中,提取出每一个小分子与致病蛋白进行对接构象的计算预测,得到在计算层面两者结合的最佳构象,并对其进行结合能的计算,按照结合能对每一个小分子进行打分,最后按照小分子得分高低来筛选出与该蛋白亲和性高的小分子,再进行下一步的临床试验。
+
+但是随着小分子数量的增多,小分子数据库的增大,先进行化合物和蛋白质的对接构象采样,再对构象进行打分这一流程显得略有冗杂,而且构象采样也会消耗大量的算力和时间。因此,通过学习局部已有小分子和蛋白质的对接亲和性,来预测分子库中其他小分子的亲和性,这一思路运营而生,业界中也慢慢涌现了许多根据未对接的小分子和蛋白质来预测两者亲和性的模型。
+
+GraphDTA(drug-target affinity)就是一个蛋白质-配体亲和性预测模型。模型结构如下图所示:
+
+
+
+GraphDTA以小分子SMILE式与蛋白质氨基酸序列作为输入,使用rdkit读取小分子的二级结构信息后利用GNN网络来提取小分子的特征向量,使用三层CNN层提取蛋白质的特征向量,最后根据所得特征向量预测两者间亲和性。
+
+## 使用限制
+
+该模型依赖于MindSpore Graph Learning,在运行该模型前需进行该库的安装。MindSpore Graph Learning的安装可参考[官网文档说明](https://www.mindspore.cn/graphlearning/docs/zh-CN/master/index.html)。
+
+原论文中共提供了四种不同的GNN网络对小分子进行编码并分析其结果,本PipeLine继承了其中表现最好的GCN网络模型。
+
+## 数据集
+
+GraphDTA选择了DeepDTA中所使用的davis和kiba数据集,其中davis数据集中共有442个蛋白质序列和68个小分子的SMILES式,以及之间两两结合的Kd/Ki,即亲和性label,共有30056个数据。而kiba数据集中共有229个蛋白质序列和2111个小分子SMILES式,label为KIBA score,共有118254个数据。
+
+## 如何使用
+
+GraphDTA支持输入蛋白质的氨基酸序列与小分子SMILES式的数据对进行推理,从而预测两者间的亲和性。
+
+预测时需输入一个csv文件,文件内格式如下:
+
+|compound_iso_smiles|target_sequence|
+| --- | --- |
+|SMILES|Fasta|
+
+```bash
+from mindsponge import PipeLine
+from mindspore_gl.nn import GNNCell
+
+GNNCell.disable_display()
+pipe = PipeLine(name="GraphDTA")
+pipe.set_device_id(0)
+pipe.initialize("inference")
+pipe.model.from_pretrained()
+res = pipe.predict({YOUR_csv_PATH})
+print(res)
+```
+
+## 训练过程
+
+GraphDTA训练支持使用蛋白质的氨基酸序列与小分子SMILES式作为输入进行训练。
+
+训练同样需要输入一个csv文件,文件内格式如下:
+
+|compound_iso_smiles|target_sequence|affinity|
+| --- | --- | --- |
+| SMILES1 | Fasta1 | label1 |
+| SMILES2 | Fasta2 | label2 |
+| SMILES3 | Fasta3 | label3 |
+| ... | ... | ... |
+
+```bash
+from mindsponge import PipeLine
+from mindspore_gl.nn import GNNCell
+
+GNNCell.disable_display()
+pipe = PipeLine(name="GraphDTA")
+pipe.set_device_id(0)
+pipe.initialize(key="train")
+pipe.train({YOUR_csv_PATH}, num_epochs=1)
+```
+
+## 引用
+
+```bash
+@article{10.1093/bioinformatics/btaa921,
+ author = {Nguyen, Thin and Le, Hang and Quinn, Thomas P and Nguyen, Tri and Le, Thuc Duy and Venkatesh, Svetha},
+ title = "{GraphDTA: predicting drug–target binding affinity with graph neural networks}",
+ journal = {Bioinformatics},
+ volume = {37},
+ number = {8},
+ pages = {1140-1147},
+ year = {2020},
+ month = {10},
+ issn = {1367-4803},
+ doi = {10.1093/bioinformatics/btaa921},
+ url = {https://doi.org/10.1093/bioinformatics/btaa921},
+}
+```
diff --git a/MindSPONGE/applications/model cards/MEGAProtein.md b/MindSPONGE/applications/model cards/MEGAProtein.md
new file mode 100644
index 0000000000000000000000000000000000000000..8ce7f5db31dde2d8da51abb3424a0c5d91fcd5ab
--- /dev/null
+++ b/MindSPONGE/applications/model cards/MEGAProtein.md
@@ -0,0 +1,328 @@
+# MEGAProtein
+
+## 模型介绍
+
+MEGA-Protein是由高毅勤老师团队与MindSpore科学计算团队共同开发的蛋白质结构预测工具。克服了[AlphaFold2](https://www.nature.com/articles/s41586-021-03819-2)数据前处理耗时过长,缺少MSA时预测精度不准确,缺乏通用评估结构质量工具的问题。
+
+MEGA-Protein主要由蛋白质结构预测工具MEGA-Fold,MSA生成修正工具MEGA-EvoGen,蛋白质结构评分工具MEGA-Assessment三部分共同组成。
+
+### 蛋白质结构预测工具MEGA-Fold
+
+MEGA-Fold的网络模型部分与AlphaFold2相同,在数据预处理的多序列对比环节采用了[MMseqs2](https://www.biorxiv.org/content/10.1101/2021.08.15.456425v1.full.pdf)进行序列检索,相比于原版端到端速度提升2-3倍;同时借助内存复用大幅提升内存利用效率,同硬件条件下支持更长序列的推理(基于32GB内存的Ascend910运行时最长支持3072长度序列推理)。
+
+### MSA生成修正工具MEGA-EvoGen
+
+MSA生成修正工具MEGA-EvoGen能显著提升单序列的预测速度,并且能够在MSA较少(few shot)甚至没有MSA(zero-shot,即单序列)的情况下,帮助MEGA-Fold/AlphaFold2等模型维持甚至提高推理精度,突破了在「孤儿序列」、高异变序列和人造蛋白等MSA匮乏场景下无法做出准确预测的限制。MEGA-EvoGen的模型构架图如下。
+
+
+
+### 蛋白质结构评分工具MEGA-Assessment
+
+蛋白质结构评分工具MEGA-Assessment可以评价蛋白质结构每个残基的准确性以及残基-残基之间的距离误差,同时可以基于评价结果对蛋白结构作出进一步的优化。
+
+## 数据集
+
+MEGA-Fold训练数据集为[PSP蛋白质结构数据集](http://ftp.cbi.pku.edu.cn/psp/),数据集大小为1.6TB,解压后为25TB。
+MEGA-Assessment训练数据集为PSP数据集中的[PSP lite](http://ftp.cbi.pku.edu.cn/psp/psp_lite/)。
+
+```shell
+.
+└─PSP
+ ├─true_structure_dataset
+ | ├─pkl
+ | | └─256 pkl packages
+ | ├─pdb
+ | | └─256 pdb packages
+ | └─true_structure_data_statistics_729.json
+ ├─distillation_dataset
+ | ├─pkl
+ | | └─256 pkl packages
+ | ├─pdb
+ | | └─256 pdb packages
+ | └─distill_data_statistics_729.json
+ ├─new_validation_dataset
+ | ├─pkl.tar.gz
+ | ├─pdb.tar.gz
+ | └─nv_data_statistics.json
+ └─psp_lite
+ ├─true_structure_mini
+ | ├─pkl
+ | | └─32 pkl packages
+ | └─true_structure_mini.pdb.tar.gz
+ └─distillation_mini
+ ├─pkl
+ | └─32 pkl packages
+ └─distillation_mini.pdb.tar.gz
+```
+
+## 如何使用
+
+mindsponge.PipeLine中分别提供了三个模型的推理流程,在使用时,可将氨基酸序列输入MEGA-EvoGen中获取该蛋白的特征,将特征输入MEGA-Fold中进行蛋白质的结构预测,最后将蛋白质特征与结构信息共同输入MEGA-Assessment中进行打分评估。以CASP14蛋白质T1082-D1为例,整体推理流程如下所示。
+
+```bash
+import numpy as np
+import mindspore as ms
+from mindsponge import PipeLine
+
+ms.set_context(mode=ms.GRAPH_MODE)
+
+# MEGA-EvoGen推理获取蛋白质生成MSA后的特征
+fasta = "GYDKDLCEWSMTADQTEVETQIEADIMNIVKRDRPEMKAEVQKQLKSGGVMQYNYVLYCDKNFNNKNIIAEVVGE"
+msa_generator = PipeLine(name="MEGAEvoGen")
+msa_generator.set_device_id(0)
+msa_generator.initialize(key="evogen_predict_256")
+msa_generator.model.from_pretrained()
+msa_feature = msa_generator.predict(fasta)
+
+# MEGA-Fold推理获取蛋白质结构信息
+fold_prediction = PipeLine(name="MEGAFold")
+fold_prediction.set_device_id(0)
+fold_prediction.initialize(key="predict_256")
+fold_prediction.model.from_pretrained()
+final_atom_positions, final_atom_mask, aatype, _, _ = fold_prediction.model.predict(msa_feature)
+
+# MEGA-Assessment对蛋白质结构进行评价
+protein_assessment = PipeLine(name = "MEGAAssessment")
+protein_assessment.set_device_id(0)
+protein_assessment.initialize("predict_256")
+protein_assessment.model.from_pretrained()
+msa_feature['decoy_aatype'] = np.pad(aatype, (0, 256 - aatype.shape[0]))
+msa_feature['decoy_atom_positions'] = np.pad(final_atom_positions, ((0, 256 - final_atom_positions.shape[0]), (0, 0), (0, 0)))
+msa_feature['decoy_atom_mask'] = np.pad(final_atom_mask, ((0, 256 - final_atom_mask.shape[0]), (0, 0)))
+
+res = protein_assessment.predict(msa_feature)
+print("score is:", np.mean(res))
+```
+
+### 使用场景
+
+MEGAEvoGen,MEGAFold,MEGAAssessment均支持多种不同场景下的不同输入格式进行推理,详情如下:
+
+为方便说明使用场景,默认下载好config文件,通过修改内置参数的方式选择不同场景,用户使用时也可按照如下方式执行,若未提前下载config文件,可通过替换样例内代码的方式下载的同时进行config的修改与加载。
+
+- MEGAEvoGen
+
+ - 序列作为输入,样例如下:
+
+ ```bash
+ from mindsponge import PipeLine
+ from mindsponge.common.config_load import load_config
+
+ fasta = "GYDKDLCEWSMTADQTEVETQIEADIMNIVKRDRPEMKAEVQKQLKSGGVMQYNYVLYCDKNFNNKNIIAEVVGE"
+ msa_generator = PipeLine(name="MEGAEvoGen")
+
+ # 未获取config文件时,执行如下两行命令即可自动下载config文件,之后所有案例同理替换,仅提供代码样例,不做相同说明
+ # from mindsponge.pipeline.pipeline import download_config
+ # conf = download_config(msa_generator.config["evogen_predict_256"], msa_generator.config_path + "evogen_predict_256.yaml")
+
+ conf = load_config({YOUR_CONFIG_PATH})
+ conf.use_pkl = False
+ msa_generator.initialize(conf=conf)
+ msa_generator.model.from_pretrained()
+ feature = msa_generator.predict(fasta)
+ print(feature.shape, feature.dtype)
+ ```
+
+ - 序列搜索MSA后所获得的pickle文件作为输入,样例如下:
+
+ ```bash
+ import pickle
+ from mindsponge import PipeLine
+
+ with open({YOUR_PICKLE_PATH}, "rb") as f:
+ data = pickle.load(f)
+ msa_generator = PipeLine(name="MEGAEvoGen")
+
+ # from mindsponge.pipeline.pipeline import download_config
+ # conf = download_config(msa_generator.config["evogen_predict_256"], msa_generator.config_path + "evogen_predict_256.yaml")
+
+ conf = load_config({YOUR_CONFIG_PATH})
+ conf.use_pkl = True
+ msa_generator.initialize(conf=conf)
+ msa_generator.model.from_pretrained()
+ feature, mask = msa_generator.predict(data)
+ print(feature.shape, feature.dtype)
+ ```
+
+- MEGAFold
+
+ - 使用搜索后所得pickle文件作为输入,样例如下:
+
+ ```bash
+ import pickle
+ import mindspore as ms
+ from mindsponge import PipeLine
+ ms.set_context(mode=ms.GRAPH_MODE)
+
+ with open({YOUR_PICKLE_PATH}, "rb") as f:
+ feature = pickle.load(f)
+ fold_prediction = PipeLine(name="MEGAFold")
+ fold_prediction.set_device_id(0)
+ fold_prediction.initialize(key="predict_256")
+ fold_prediction.model.from_pretrained()
+ protein_structure = fold_prediction.predict(feature)
+ print(protein_structure)
+ ```
+
+ - 单序列进行MSA检索并进行推理(完整流程),其中MSA检索配置请参考之前所提供指令。检索完成后使用pickle进行推理场景与上述另一场景完全相同,不重复提供代码。
+
+ - 后续MEGAFold会支持将蛋白质序列与template作为输入,不提供MSA进行推理的场景。
+
+- MEGAAssessment
+
+ - MEGAAssessment仅支持序列搜索所得pickle文件和MEGAFold推理所得pdb作为输入单场景,样例如下:
+
+ ```bash
+ import pickle
+ import numpy as np
+ from mindspore import context
+ from mindsponge import PipeLine
+ from mindsponge.common.config_load import load_config
+ from mindsponge.common.protein import from_pdb_string
+
+ protein_assessment = PipeLine(name="MEGAAssessment")
+ protein_assessment.set_device_id(0)
+
+ # from mindsponge.pipeline.pipeline import download_config
+ # conf = download_config(protein_assessment.config["predict_256"], protein_assessment.config_path + "predict_256.yaml")
+
+ conf = load_config({YOUR_CONFIG_PATH})
+ protein_assessment.initialize(key="predict_256")
+ protein_assessment.model.from_pretrained()
+
+ # load raw feature
+ f = open({YOUR_PICKLE_PATH}, "rb")
+ raw_feature = pickle.load(f)
+ f.close()
+
+ # load decoy pdb
+ with open({YOUR_PDB_PATH}, 'r') as f:
+ decoy_prot_pdb = from_pdb_string(f.read())
+ f.close()
+ raw_feature['decoy_aatype'] = decoy_prot_pdb.aatype
+ raw_feature['decoy_atom_positions'] = decoy_prot_pdb.atom_positions
+ raw_feature['decoy_atom_mask'] = decoy_prot_pdb.atom_mask
+
+ res = protein_assessment.predict(raw_feature)
+ print("score is:", np.mean(res))
+ ```
+
+## 训练过程
+
+Pipeline中提供了MEGAFold和MEGAAssessment两个模型的训练代码。MEGAFold的训练集为PSP数据集,MEGAAssessment的训练集为PSP lite数据集。
+
+MEGAFold的训练样例代码如下所示:
+
+```bash
+import mindspore as ms
+from mindsponge import PipeLine
+
+ms.set_context(mode=ms.GRAPH_MODE)
+
+pipe = PipeLine(name="MEGAFold")
+pipe.set_device_id(0)
+pipe.initialize(key="initial_training")
+pipe.train({YOUR_DATA_PATH}, num_epochs=1)
+```
+
+MEGAAssessment的训练样例代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+
+pipe = PipeLine(name="MEGAAssessment")
+pipe.set_device_id(0)
+pipe.initialize(key="initial_training")
+pipe.train({YOUR_DATA_PATH}, num_epochs=1)
+```
+
+## 使用限制
+
+在使用模型前,推荐进行数据库检索配置。
+
+- 配置MSA检索
+
+ 首先安装MSA搜索工具**MMseqs2**,该工具的安装和使用可以参考[MMseqs2 User Guide](https://mmseqs.com/latest/userguide.pdf),安装完成后运行以下命令配置环境变量:
+
+ ``` shell
+ export PATH=$(pwd)/mmseqs/bin/:$PATH
+ ```
+
+ 然后下载MSA所需数据库:
+
+ - [uniref30_2103](http://wwwuser.gwdg.de/~compbiol/colabfold/uniref30_2103.tar.gz):压缩包68G,解压后375G
+ - [colabfold_envdb_202108](http://wwwuser.gwdg.de/~compbiol/colabfold/colabfold_envdb_202108.tar.gz):压缩包110G,解压后949G
+
+ 下载完成后需解压并使用MMseqs2处理数据库,数据处理参考[colabfold](http://colabfold.mmseqs.com),主要命令如下:
+
+ ``` bash
+ tar xzvf "uniref30_2103.tar.gz"
+ mmseqs tsv2exprofiledb "uniref30_2103" "uniref30_2103_db"
+ mmseqs createindex "uniref30_2103_db" tmp1 --remove-tmp-files 1
+
+ tar xzvf "colabfold_envdb_202108.tar.gz"
+ mmseqs tsv2exprofiledb "colabfold_envdb_202108" "colabfold_envdb_202108_db"
+ mmseqs createindex "colabfold_envdb_202108_db" tmp2 --remove-tmp-files 1
+ ```
+
+- 配置MSA检索加速(可选)
+
+ 下载MSA加速缓存工具:
+ - [FoldMSA.tar.gz](https://download.mindspore.cn/mindscience/mindsponge/msa_tools/Fold_MSA.tar.gz):按照工具内说明操作进行MSA搜索加速。
+
+- 配置模板检索
+
+ 首先安装模板搜索工具[**HHsearch**](https://github.com/soedinglab/hh-suite)
+ 与[**kalign**](https://msa.sbc.su.se/downloads/kalign/current.tar.gz),然后下载模板检索所需数据库:
+
+ - [pdb70](http://wwwuser.gwdg.de/~compbiol/data/hhsuite/databases/hhsuite_dbs/old-releases/pdb70_from_mmcif_200401.tar.gz):压缩包19G,解压后56G
+ - [mmcif database](https://ftp.rcsb.org/pub/pdb/data/structures/divided/mmCIF/): 零散压缩文件~50G,解压后~200G,需使用爬虫脚本下载,下载后需解压所有mmcif文件放在同一个文件夹内。
+ - [obsolete_pdbs](http://ftp.wwpdb.org/pub/pdb/data/status/obsolete.dat):140K
+
+ *数据库下载网站均为国外网站,下载速度可能较慢,需要自行配置VPN*。
+
+ - 配置数据库检索config
+
+ 根据数据库安装情况配置`config/data.yaml`中数据库搜索的相关配置`database_search`,相关参数含义如下:
+
+ ```bash
+ # configuration for template search
+ hhsearch_binary_path HHsearch可执行文件路径
+ kalign_binary_path kalign可执行文件路径
+ pdb70_database_path pdb70文件夹路径
+ mmcif_dir mmcif文件夹路径
+ obsolete_pdbs_path PDB IDs的映射文件路径
+ max_template_date 模板搜索截止时间,该时间点之后的模板会被过滤掉,默认值"2100-01-01"
+ # configuration for Multiple Sequence Alignment
+ mmseqs_binary MMseqs2可执行文件路径
+ uniref30_path uniref30文件夹路径
+ database_envdb_dir colabfold_envdb_202108文件夹路径
+ a3m_result_path mmseqs2检索结果(msa)的保存路径,默认值"./a3m_result/"
+ ```
+
+## 引用
+
+- 结构预测工具MEGA-Fold与训练数据集PSP
+
+```bash
+@misc{https://doi.org/10.48550/arxiv.2206.12240,
+doi = {10.48550/ARXIV.2206.12240},
+url = {https://arxiv.org/abs/2206.12240},
+author = {Liu, Sirui and Zhang, Jun and Chu, Haotian and Wang, Min and Xue, Boxin and Ni, Ningxi and Yu, Jialiang and Xie, Yuhao and Chen, Zhenyu and Chen, Mengyun and Liu, Yuan and Patra, Piya and Xu, Fan and Chen, Jie and Wang, Zidong and Yang, Lijiang and Yu, Fan and Chen, Lei and Gao, Yi Qin},
+title = {PSP: Million-level Protein Sequence Dataset for Protein Structure Prediction},
+publisher = {arXiv},
+year = {2022},
+copyright = {Creative Commons Attribution 4.0 International}
+}
+```
+
+- MSA生成修正工具MEGA-EvoGen
+
+```bash
+@article{zhang2022few,
+ title={Few-shot learning of accurate folding landscape for protein structure prediction},
+ author={Zhang, Jun and Liu, Sirui and Chen, Mengyun and Chu, Haotian and Wang, Min and Wang, Zidong and Yu, Jialiang and Ni, Ningxi and Yu, Fan and Chen, Diqing and others},
+ journal={arXiv preprint arXiv:2208.09652},
+ year={2022}
+ }
+```
\ No newline at end of file
diff --git a/MindSPONGE/applications/model cards/MGBERT.MD b/MindSPONGE/applications/model cards/MGBERT.MD
new file mode 100644
index 0000000000000000000000000000000000000000..0a415927ab0f8b9937ad3abe595a9a1f9a6c01ba
--- /dev/null
+++ b/MindSPONGE/applications/model cards/MGBERT.MD
@@ -0,0 +1,108 @@
+# MG-BERT
+
+## 模型介绍
+
+MG-BERT(Molecular-Graph-BERT)是分子性质预测模型。MG-BERT将GNN集成到BERT中,提出了分子图BERT(MG-BERT)模型,利用非监督学习的方式学习小分子的原子表示,之后再将该预训练模型应用到分子属性预测下游任务中。
+
+图神经网络存在一个缺陷,在学习时容易变得过平滑,即无论特征矩阵的初始状态如何,多次卷积后,同一连通分量内所有节点的特征都趋于一致。在模型中将GNN与BERT结合,克服了过平滑的问题,使得模型具备提取深层特征的能力。并且MG-BERT可以通过BERT的attention机制关注目标性质相关的原子和子结构。MG-BERT的网络架构如下所示:
+
+
+
+MG-BERT与BERT存在一些不同点:
+
+1. 在embedding layer,word token变成了atom token,由于分子中的原子没有顺序关联,因此不需要指定位置信息。
+
+2. global attention变为了local attention based on chemical bonds。在自然语言句子中,一个单词可能与其他任何单词相关,但是在分子中,原子主要通过化学键与相邻原子相关联,因此,在模型中使用邻接矩阵来控制分子中的交换信息。
+
+3. 为每个分子添加了可以连接到所有原子的超节点(supernode),一方面,超节点可以与其他节点交换信息,在一定程度上解决了长距离依赖问题。另一方面,超节点可以被视为最终分子表示,用于解决下游任务。
+
+MG-BERT的训练共分为两个阶段:
+
+1. 预训练(pre-train)阶段,使用非监督学习让MG-BERT学习小分子的原子表示,输入无标签的小分子SMILES式,对输入分子中的原子进行mask并且进行预测。
+
+2. 微调(fine-tune)阶段,基于第一阶段的预训练模型,输入带有标签的小分子数据进行训练,预测小分子的性质,完成分类和回归任务。
+
+## 数据集
+
+预训练阶段所使用数据集为ChEMBL数据库,从中随机抽取数据库中170万个化合物作为训练数据,数据集可从[此处](https://ftp.ebi.ac.uk/pub/databases/chembl/ChEMBLdb/releases/chembl_31/)下载。
+
+微调阶段使用从ADMETlab和MoleculeNet中收集的16个数据集对MG-BERT进行训练和评估,共包含8个用于回归任务的数据集,8个用于分类任务的数据集。将数据集按8:1:1的比列分为训练、验证和测试数据集。并使用SMILES长度分层抽象,使数据集的分裂更加均匀。回归和分类所用数据集可从[此处](https://gitee.com/lytgogogo/project_data/tree/master/data)下载。
+
+## 如何使用
+
+MG-BERT支持分类任务和回归任务推理,输入为txt文件,在文件中存放小分子SMILES式即可。
+
+分类任务推理代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+
+data = {YOUR_TXT_PATH}
+pipe = PipeLine(name="MGBert")
+pipe.set_device_id(0)
+pipe.initialize("mgbert_classification")
+pipe.model.from_pretrained()
+pipe.predict(data)
+```
+
+回归任务推理代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+
+data = {YOUR_TXT_PATH}
+pipe = PipeLine(name="MGBert")
+pipe.set_device_id(0)
+pipe.initialize("mgbert_regression")
+pipe.model.from_pretrained()
+pipe.predict(data)
+```
+
+## 训练过程
+
+MG-BERT共提供了3种训练,预训练,分类任务的微调,回归任务的微调。
+
+预训练代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+
+pipe = PipeLine(name="MGBert")
+pipe.initialize("mgbert_pretrain")
+result = pipe.train({YOUR_DATA_PATH}+"chembl_31_chemreps.txt",num_epochs=1)
+```
+
+分类任务微调代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+
+pipe = PipeLine(name="MGBert")
+pipe.initialize("mgbert_classification")
+result = pipe.train({YOUR_DATA_PATH}+'Pgp_sub.txt',num_epochs=1)
+```
+
+回归任务微调代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+
+pipe = PipeLine(name="MGBert")
+pipe.initialize("mgbert_regression")
+result = pipe.train({YOUR_DATA_PATH}+'logS.txt',num_epochs=1)
+```
+
+## 引用
+
+```bash
+@article{zhang2021mg,
+ title={MG-BERT: leveraging unsupervised atomic representation learning for molecular property prediction},
+ author={Zhang, Xiao-Chen and Wu, Cheng-Kun and Yang, Zhi-Jiang and Wu, Zhen-Xing and Yi, Jia-Cai and Hsieh, Chang-Yu and Hou, Ting-Jun and Cao, Dong-Sheng},
+ journal={Briefings in bioinformatics},
+ volume={22},
+ number={6},
+ pages={bbab152},
+ year={2021},
+ publisher={Oxford University Press}
+}
+```
diff --git a/MindSPONGE/applications/model cards/ProteinMPNN.MD b/MindSPONGE/applications/model cards/ProteinMPNN.MD
new file mode 100644
index 0000000000000000000000000000000000000000..8df9d9d804922ded7b14fd54634b7e8b23a5b574
--- /dev/null
+++ b/MindSPONGE/applications/model cards/ProteinMPNN.MD
@@ -0,0 +1,59 @@
+# ProteinMPNN
+
+## 模型介绍
+
+ProteinMPNN是一种基于深度学习的蛋白质序列设计方法,给定一个蛋白质的backbone结构,预测能折叠成该结构的氨基酸序列。ProteinMPNN模型利用不同位置的氨基酸序列可以在单链或多链之间耦合,使其能够应用在当前的蛋白质设计中,其广泛适用于单体、环状低聚物、蛋白质纳米颗粒等设计。
+
+ProteinMPNN的原始网络架构源于[Structure Transformer](https://proceedings.neurips.cc/paper/2019/file/f3a4ff4839c56a5f460c88cce3666a2b-Paper.pdf),并且在其基础上做了进一步的修改。
+
+Structure Transformer的构架如下所示:
+
+
+
+ProteinMPNN主要做了如下几点改进:
+
+1. 新增全原子距离信息,引入N, CA, CB, C, O的距离信息并进行采样,将距离转化为16种特征向量,使得每一个残基都有5个原子,对应的边的特征向量shape为 $5 \times 5 \times 16$ 。
+
+2. MPNN在decoder中引入随机位点的解码机制(随机打乱顺序),而非传统地语言模型按顺序的进行N->C序列设计。
+
+3. 在同源多聚体的任务中,同一位点的氨基酸可以被偶联被同时解码。
+
+4. 训练时引入主链的高斯噪音,增强模型的泛化能力,同时让模型更加关注整体的拓扑结构信息而不是过度关注局部环境的信息。
+
+ProteinMPNN使用了3层encoder,3层decoder和128层隐藏层,ProteinMPNN结构如下所示:
+
+
+
+## 使用限制
+
+ProteinMPNN暂未提供大批量原始数据处理脚本,因此模型暂不支持训练。但是支持使用单个蛋白质PDB文件作为输入进行推理。
+
+## 如何使用
+
+本模型中支持使用单个蛋白质PDB文件作为输入进行氨基酸序列预测,样例代码如下所示:
+
+```bash
+from mindsponge import PipeLine
+
+pipe = PipeLine(name = "Proteinmpnn")
+pipe.set_device_id(0)
+pipe.initialize("proteinmpnnpredict")
+pipe.model.from_pretrained()
+res = pipe.predict({YOUR_PDB_PATH})
+print(res)
+```
+
+## 引用
+
+```bash
+@article{dauparas2022robust,
+ title={Robust deep learning--based protein sequence design using ProteinMPNN},
+ author={Dauparas, Justas and Anishchenko, Ivan and Bennett, Nathaniel and Bai, Hua and Ragotte, Robert J and Milles, Lukas F and Wicky, Basile IM and Courbet, Alexis and de Haas, Rob J and Bethel, Neville and others},
+ journal={Science},
+ volume={378},
+ number={6615},
+ pages={49--56},
+ year={2022},
+ publisher={American Association for the Advancement of Science}
+}
+```
diff --git a/MindSPONGE/applications/model cards/UFold.md b/MindSPONGE/applications/model cards/UFold.md
new file mode 100644
index 0000000000000000000000000000000000000000..ee1ebaeeebd327d8514155a61d7c904d61fc75ca
--- /dev/null
+++ b/MindSPONGE/applications/model cards/UFold.md
@@ -0,0 +1,91 @@
+# UFold
+
+## 模型介绍
+
+对于许多RNA分子来说,二级结构对于RNA的正确功能至关重要。从核苷酸序列预测RNA二级结构是基因组学中一个长期存在的问题,但随着时间的推移,预测性能已经达到了稳定水平。传统的RNA二级结构预测算法主要基于热力学模型,通过自由能最小化,这强加了很强的先验假设,而且运行速度很慢。UFold作为一种基于深度学习的方法,用于RNA二级结构预测,直接根据注释数据和碱基配对规则进行训练。UFold提出了一种新的RNA序列的类图像表示方法,它可以通过完全卷积网络(FCNs)进行有效的处理。
+
+模型的输入是通过取One-Hot Encoding的四个基本通道的所有组合的外积生成的,这产生了16个通道。然后,表示配对概率的附加信道与16信道序列表示串联,并一起作为模型的输入。UFold模型是U-Net的一个变体,它将17通道张量作为输入,并通过连续卷积和最大池运算转换数据。
+
+## 数据集
+
+UFold使用了多个基准数据集:
+
+- RNAStralign,包含来自8个RNA家族的30 451个独特序列;
+
+- ArchiveII,包含来自10个RNA家族的3975个序列,是最广泛使用的RNA结构预测性能基准数据集;
+
+- bpRNA-1m,包含来自2588个家族的102 318个序列,是可用的最全面的RNA结构数据集之一;
+
+- bpRNA new,源自Rfam 14.2,包含来自1500个新RNA家族的序列。
+
+原始数据集ArchiveII,bpnew,TS0,TS1,TS2,TS3为bpseq格式数据文件,在使用前需要将原始bpseq格式数据文件处理成pickle文件,处理后的数据文件可从[网盘](https://pan.baidu.com/s/1y2EWQlZJhJfqi_UyUnEicw?pwd=o5k2)中下载,下载后将数据置于data文件夹下。
+
+## 如何使用
+
+推理可支持输入单个RNA的ct文件,也可以以文件夹作为路径输入,文件夹中存储所有ct文件。当输入为单个ct文件时,推理结果为单个预测结果,当输入为文件夹时,推理结果为文件夹下所有ct文件的推理结果,顺序与ct文件首字母排序顺序相同。
+
+```bash
+import collections
+from mindsponge import PipeLine
+from mindsponge.common.config_load import load_config
+from mindsponge.pipeline.models.ufold.ufold_data import RNASSDataGenerator
+
+RNA_SS_data = collections.namedtuple('RNA_SS_data', 'seq ss_label length name pairs')
+pipe = PipeLine(name = "UFold")
+data_src = {YOUR_DATA_PATH}
+
+# 第一次使用时未获取config文件,执行如下指令模型可自动下载config文件,后续使用可手动修改所需内容
+# from mindsponge.pipeline.pipeline import download_config
+# conf = download_config(pipe.config["ufold_config"], pipe.config_path + "ufold_config.yaml")
+
+config_path = {YOUR_CONFIG_PATH}
+conf = load_config(config_path)
+conf.is_training = False
+# 可选test_ckpt为'ArchiveII', 'bpnew', 'TS0', 'TS1', 'TS2', 'TS3', 'All'
+conf.test_ckpt = 'All'
+pipe.set_device_id(0)
+pipe.initialize(conf=conf)
+pipe.model.from_pretrained()
+data = {/YOUR_DATA_PATH/xxx.ct}
+# data = {/YOUR_DATA_PATH/}
+result = pipe.predict(data)
+print(result)
+```
+
+## 训练过程
+
+```bash
+import collections
+from mindsponge import PipeLine
+from mindsponge.common.config_load import load_config
+from mindsponge.pipeline.models.ufold.ufold_data import RNASSDataGenerator
+
+RNA_SS_data = collections.namedtuple('RNA_SS_data', 'seq ss_label length name pairs')
+pipe = PipeLine(name = "UFold")
+config_path = {YOUR_CONFIG_PATH}
+conf = load_config(config_path)
+conf.is_training = True
+# 训练集可为['ArchiveII', 'bpnew', 'TS0', 'TS1', 'TS2', 'TS3']中的一个或多个
+conf.train_files = ['TS0']
+pipe.set_device_id(1)
+pipe.initialize(conf=conf)
+pipe.train({YOUR_DATA_PATH}, num_epochs = 10)
+```
+
+## 引用
+
+```bash
+@article{10.1093/nar/gkab1074,
+ author = {Fu, Laiyi and Cao, Yingxin and Wu, Jie and Peng, Qinke and Nie, Qing and Xie, Xiaohui},
+ title = "{UFold: fast and accurate RNA secondary structure prediction with deep learning}",
+ journal = {Nucleic Acids Research},
+ volume = {50},
+ number = {3},
+ pages = {e14-e14},
+ year = {2021},
+ month = {11},
+ issn = {0305-1048},
+ doi = {10.1093/nar/gkab1074},
+ url = {https://doi.org/10.1093/nar/gkab1074},
+}
+```
diff --git a/MindSPONGE/applications/model cards/afmultimer.md b/MindSPONGE/applications/model cards/afmultimer.md
new file mode 100644
index 0000000000000000000000000000000000000000..59a4bf4171a1adf5d869894fd2ffcaa8430c61c6
--- /dev/null
+++ b/MindSPONGE/applications/model cards/afmultimer.md
@@ -0,0 +1,76 @@
+# AlphaFold Multimer
+
+## 模型介绍
+
+[AlphaFold Multimer](https://www.biorxiv.org/content/10.1101/2021.10.04.463034v2.abstract)是蛋白质复合物结构预测模型。
+
+AlphaFold 2主要是通过添加连接子(Linker)将多链进行连接或残基间插入间隔等方法,将多链“伪装”成单链输入进行结构模拟。AlphaFold-Multimer在保留了AlphaFold 2算法一些重要特性的基础上,做了部分调整以满足复合物结合界面结构的特殊需要:
+
+- 训练数据同来自于PDB,出于对计算和存储消耗的考虑,AlphaFold-Multimer同样沿用了AlphaFold 2将蛋白质截断为384个氨基酸长度输入方式,但截取的方法上力求扩大链覆盖度、截断片段多样性的同时,兼顾结合面与非接合面的截取。
+
+- 沿用AlphaFold 2特色的FAPE(Frame aligned point error)结构打分函数,且为链内氨基酸对原子间设置截断距离为 10 埃,链间不设置固定截断距离值。
+
+同时AlphaFold-Multimer也具备独特的计算方法,其中最具创新的模块在于多链特征提取和对称置换,使其超越包括基于AlphaFold的所有既有预测方法。
+
+- 修改损失函数。同源多聚物将被视作同样的序列多次出现,充分考虑对称置换的情形,如预测一个A2B形式的复合物时,充分考虑亚单位所有的排列组合形式,包括两个A单位间置换的同等有效性等,避免正确的预测被罚分的情况,保证模型训练的有效性。
+
+- 链间共进化。AlphaFold-Multimer采用了以往文献报道的方法,根据遗传距离或序列相似性判断种属关系,用同种属的序列进行配对以期获得同源结构及MSA信息并供给网络。
+
+- 对位置编码(postional encoding)进行了重新编码,利用f_asym_id来进行链的编码,原来的残基距离编码d_ij只能在同一套f_asym_id中进行。利用f_entity_id进行实体编码(哪种蛋白质),利用f_sym_id来对同一套实体中的蛋白进行区分。
+
+- 修改了模型置信度,提高了链间氨基酸残基间作用的权重,从而提升结合界面的精确度。
+
+当前PipeLine中AlphaFold Multimer只提供推理,暂不支持训练。
+
+## 如何使用
+
+以6T36蛋白为例,Multimer运行样例代码如下所示。
+
+```bash
+import os
+import stat
+import pickle
+from mindsponge import PipeLine
+from mindsponge.common.protein import to_pdb_v2, from_prediction_v2
+
+cmd = "wget https://download.mindspore.cn/mindscience/mindsponge/Multimer/examples/6T36.pkl"
+os.system(cmd)
+
+pipe = PipeLine(name="Multimer")
+pipe.set_device_id(0)
+pipe.initialize("predict_256")
+pipe.model.from_pretrained()
+with open("./6T36.pkl", "rb") as f:
+ raw_feature = pickle.load(f)
+final_atom_positions, final_atom_mask, confidence, b_factors = pipe.predict(raw_feature)
+unrelaxed_protein = from_prediction_v2(final_atom_positions,
+ final_atom_mask,
+ raw_feature["aatype"],
+ raw_feature["residue_index"],
+ b_factors,
+ raw_feature["asym_id"],
+ False)
+pdb_file = to_pdb_v2(unrelaxed_protein)
+os.makedirs('./result/', exist_ok=True)
+os_flags = os.O_RDWR | os.O_CREAT
+os_modes = stat.S_IRWXU
+pdb_path = './result/unrelaxed_6T36.pdb'
+with os.fdopen(os.open(pdb_path, os_flags, os_modes), 'w') as fout:
+ fout.write(pdb_file)
+print("confidence:", confidence)
+```
+
+## 引用
+
+```bash
+@article {AlphaFold-Multimer2021,
+ author = {Evans, Richard and O{\textquoteright}Neill, Michael and Pritzel, Alexander and Antropova, Natasha and Senior, Andrew and Green, Tim and {\v{Z}}{\'\i}dek, Augustin and Bates, Russ and Blackwell, Sam and Yim, Jason and Ronneberger, Olaf and Bodenstein, Sebastian and Zielinski, Michal and Bridgland, Alex and Potapenko, Anna and Cowie, Andrew and Tunyasuvunakool, Kathryn and Jain, Rishub and Clancy, Ellen and Kohli, Pushmeet and Jumper, John and Hassabis, Demis},
+ journal = {bioRxiv},
+ title = {Protein complex prediction with AlphaFold-Multimer},
+ year = {2021},
+ elocation-id = {2021.10.04.463034},
+ doi = {10.1101/2021.10.04.463034},
+ URL = {https://www.biorxiv.org/content/early/2021/10/04/2021.10.04.463034},
+ eprint = {https://www.biorxiv.org/content/early/2021/10/04/2021.10.04.463034.full.pdf},
+}
+```
diff --git a/MindSPONGE/applications/model cards/pafnucy.md b/MindSPONGE/applications/model cards/pafnucy.md
new file mode 100644
index 0000000000000000000000000000000000000000..87f1e9a0fc34bf8a574dec8bb23751c9a590e24b
--- /dev/null
+++ b/MindSPONGE/applications/model cards/pafnucy.md
@@ -0,0 +1,116 @@
+# Pafnucy
+
+## 模型介绍
+
+[Pafnucy](https://academic.oup.com/bioinformatics/article/34/21/3666/4994792)是一种用于预测蛋白质-配体复合物亲和性的深度卷积神经网络。使用三维网格表征复合物,在模型中使用3D卷积生成该表征的特征图,以相同的方式处理蛋白质和配体的原子。
+
+Pafnucy模型由卷积模块和线性模块两部分组成,层与层之间的连接类型不同。卷积模块由一个3D卷积层和最大池化层构成。Pafnucy使用了三个分别带有64,128,256个过滤器的卷积层的卷积模块,将最后一个模块的输出结果平坦化之后作为输入进入全连接层的模块中。
+
+## 使用限制
+
+该模型依赖于软件Open Babel,在使用前需提前安装openbabel-3.1.1,并且使用pip install的方式安装Open Babel对应版本python包。
+
+Open Babel依赖于低版本python,所以安装前请确保 `python <= 3.7.16`。
+
+可从[源码下载地址](https://github.com/openbabel/openbabel/releases)获取openbabel软件源码压缩包用于后续解压编译安装,安装流程可参考:
+
+```bash
+tar zxf openbabel-3.1.1.tar.bz2
+cd openbabel-3.1.1
+mkdir build
+cd build/
+cmake ..
+make
+sudo make install
+```
+
+可在终端使用如下指令验证Open Babel是否安装成功:
+
+```bash
+obabel --help
+```
+
+还可使用conda安装Open Babel软件,具体安装指令如下:
+
+```bash
+conda install -c openbabel openbabel
+```
+
+conda安装Open Babel版本为2.4.1,由于Open Babel版本之间冲突,运行模型时会因关键词不同导致运行报错,如需运行可将mindsponge.pipeline.models.pafnucy.pafnucy_data.py中
+
+```bash
+self.NAMED_PROPS = ['hyb', 'heavydegree', 'heterodegree', 'partialcharge']
+```
+
+修改为
+
+```bash
+self.NAMED_PROPS = ['hyb', 'heavyvalence', 'heterovalence', 'partialcharge']
+```
+
+即可。
+
+## 数据集
+
+模型所使用数据集为PDBBind v2016,数据集大小约为2.5G。
+
+- Index files of PDBbind
+- Protein-ligand complexes: The general set minus refined set
+- Protein-ligand complexes: The refined set
+- Protein-protein complexes
+- Ligand molecules in the general set (Mol2 format)
+- Ligand molecules in the general set (SDF format)
+- pdbbind_v2013_core_set.tar.gz
+
+### 数据集下载
+
+Pipeline中提供了数据集下载脚本,在训练时即可自动进行数据集的下载。
+
+## 如何使用
+
+```bash
+import os
+from mindsponge import PipeLine
+from openbabel import pybel
+
+# 小分子为mol2文件,蛋白质为pdb文件或mol2文件
+pocket_path = {YOUR_POCKET_PATH}
+ligand_path = {YOUR_LIGAND_PATH}
+raw_data = [pocket_path, ligand_path]
+pipe = PipeLine(name = "pafnucy")
+pipe.set_device_id(0)
+pipe.initialize("config")
+pipe.model.from_pretrained()
+result = pipe.predict(raw_data)
+print(result)
+```
+
+## 训练过程
+
+训练只需向模型提供数据集所在路径,若该路径下不存在数据集,则模型会自动下载训练所需PDBBind数据集,之后进行训练。训练时需将config文件中的is_training修改为True。
+
+```bash
+from mindsponge import PipeLine
+pipe = PipeLine(name = "pafnucy")
+pipe.set_device_id(0)
+pipe.initialize("config")
+pipe.train({YOUR_DATA_PATH}, num_epochs = 1)
+```
+
+## 引用
+
+```bash
+@article{10.1093/bioinformatics/bty374,
+ author = {Stepniewska-Dziubinska, Marta M and Zielenkiewicz, Piotr and Siedlecki, Pawel},
+ title = "{Development and evaluation of a deep learning model for protein–ligand binding affinity prediction}",
+ journal = {Bioinformatics},
+ volume = {34},
+ number = {21},
+ pages = {3666-3674},
+ year = {2018},
+ month = {05},
+ issn = {1367-4803},
+ doi = {10.1093/bioinformatics/bty374},
+ url = {https://doi.org/10.1093/bioinformatics/bty374},
+}
+```
diff --git a/MindSPONGE/applications/molecular_dynamics/protein_relaxation/protein_relax.py b/MindSPONGE/applications/molecular_dynamics/protein_relaxation/protein_relax.py
index 3348175372de4909da8924385a3b76b863e3f913..4f3427c412eae244b45303175b0b225f88768a03 100644
--- a/MindSPONGE/applications/molecular_dynamics/protein_relaxation/protein_relax.py
+++ b/MindSPONGE/applications/molecular_dynamics/protein_relaxation/protein_relax.py
@@ -19,7 +19,6 @@ $ python3 protein_relax.py -i examples/protein/case2.pdb -o examples/protein/cas
"""
import argparse
-import numpy as np
from mindspore import context, Tensor, nn
from mindspore import numpy as msnp
import mindspore as ms
@@ -28,9 +27,11 @@ from mindsponge import Sponge
from mindsponge import set_global_units
from mindsponge import Protein
from mindsponge import ForceField
-from mindsponge import SimulationCell
from mindsponge.callback import RunInfo
+from mindsponge.core import WithEnergyCell, WithForceCell, RunOneStepCell
from mindsponge.optimizer import SteepestDescent
+from mindsponge.sampling import MaskedDriven
+from mindsponge.partition import NeighbourList
from mindsponge.potential.bias import OscillatorBias
from mindsponge.system.modelling.pdb_generator import gen_pdb
@@ -51,7 +52,7 @@ if context.get_context("device_target") == "Ascend":
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True,
graph_kernel_flags="--enable_cluster_ops=ReduceSum --reduce_fuse_depth=10")
else:
- context.set_context(mode=context.GRAPH_MODE, device_target="GPU", device_id=0)
+ context.set_context(mode=context.GRAPH_MODE, device_target="GPU", device_id=1, enable_graph_kernel=True)
def get_violation_loss(system):
@@ -91,16 +92,20 @@ def optimize_strategy(system, gds, loops, ads, adm, nonh_mask, mode=1):
energy = ForceField(system, "AMBER.FF14SB")
learning_rate = 1e-07
factor = 1.003
- opt = SteepestDescent(
- system.trainable_params(),
- learning_rate=learning_rate,
- factor=factor,
- nonh_mask=nonh_mask,
- )
+ dynamic_lr = nn.ExponentialDecayLR(learning_rate, factor, 1, is_stair=True)
+ opt = SteepestDescent(system.trainable_params(), dynamic_lr, max_shift=1.0)
+ neighbours = NeighbourList(system, cutoff=None, cast_fp16=True)
+ with_energy = WithEnergyCell(system, energy, neighbour_list=neighbours)
+ modifier = MaskedDriven(length_unit=with_energy.length_unit,
+ energy_unit=with_energy.energy_unit,
+ mask=nonh_mask)
+ with_force = WithForceCell(system, neighbour_list=neighbours, modifier=modifier)
+ one_step = RunOneStepCell(energy=with_energy, force=with_force, optimizer=opt)
+
for i, param in enumerate(opt.trainable_params()):
print(i, param.name, param.shape)
- md = Sponge(system, energy, opt)
+ md = Sponge(network=one_step)
run_info = RunInfo(1)
md.run(gds, callbacks=[run_info])
@@ -114,13 +119,14 @@ def optimize_strategy(system, gds, loops, ads, adm, nonh_mask, mode=1):
if mode in (1, 2):
energy.set_energy_scale([1, 1, 1, 1, 1, 1])
- simulation_network = SimulationCell(system, energy, bias=[harmonic_energy])
+ simulation_network = WithEnergyCell(system, energy, bias=[harmonic_energy], neighbour_list=neighbours)
for _ in range(adm):
opt = nn.Adam(system.trainable_params(), learning_rate=learning_rate)
+ one_step = RunOneStepCell(energy=simulation_network, optimizer=opt)
for i, param in enumerate(opt.trainable_params()):
print(i, param.name, param.shape)
- md = Sponge(simulation_network, optimizer=opt)
+ md = Sponge(network=one_step)
print(md.calc_energy())
run_info = RunInfo(1)
md.run(ads, callbacks=[run_info])
@@ -129,13 +135,14 @@ def optimize_strategy(system, gds, loops, ads, adm, nonh_mask, mode=1):
if mode in (1, 3):
energy.set_energy_scale([1, 1, 1, 0, 0, 0])
- simulation_network = SimulationCell(system, energy, bias=[harmonic_energy])
+ simulation_network = WithEnergyCell(system, energy, bias=[harmonic_energy], neighbour_list=neighbours)
for _ in range(adm):
opt = nn.Adam(system.trainable_params(), learning_rate=learning_rate)
+ one_step = RunOneStepCell(energy=simulation_network, optimizer=opt)
for i, param in enumerate(opt.trainable_params()):
print(i, param.name, param.shape)
- md = Sponge(simulation_network, optimizer=opt)
+ md = Sponge(network=one_step)
print(md.calc_energy())
run_info = RunInfo(1)
md.run(ads, callbacks=[run_info])
@@ -150,9 +157,7 @@ def main():
ms.set_seed(seed)
set_global_units("A", "kcal/mol")
system = Protein(pdb=pdb_name, rebuild_hydrogen=True)
- nonh_mask = Tensor(
- np.where(system.atomic_number[0] > 1, 0, 1)[None, :, None], ms.int32
- )
+ nonh_mask = system.heavy_atom_mask
gds, loops, ads, adm = 100, 3, 200, 2
system = optimize_strategy(system, gds, loops, ads, adm, nonh_mask, mode=1)
diff --git a/MindSPONGE/applications/research/FAAST/A.png b/MindSPONGE/applications/research/FAAST/A.png
new file mode 100644
index 0000000000000000000000000000000000000000..22e0fe7e034ad2446d80cca0978634cd62c1cc71
Binary files /dev/null and b/MindSPONGE/applications/research/FAAST/A.png differ
diff --git a/MindSPONGE/applications/research/FAAST/B.png b/MindSPONGE/applications/research/FAAST/B.png
new file mode 100644
index 0000000000000000000000000000000000000000..81239fa9ade1e62d25d378b1c41311f01d6ddf96
Binary files /dev/null and b/MindSPONGE/applications/research/FAAST/B.png differ
diff --git a/MindSPONGE/applications/research/FAAST/README.md b/MindSPONGE/applications/research/FAAST/README.md
index 9303f90711bad47ebf0e0c536bc31061fe710cd2..45ea9b310c3b5b52e2bfea66f56fb057fcb88fd8 100644
--- a/MindSPONGE/applications/research/FAAST/README.md
+++ b/MindSPONGE/applications/research/FAAST/README.md
@@ -1,9 +1,360 @@
-# FAAST
+ENGLISH|[简体中文](README_CN.md)
+# FAAST and RASP
+NMR experiments can detect in situ structures and dynamic interactions, but the NMR assignment process requires expertise and is time-consuming, thereby limiting its applicability. Deep learning algorithms have been employed to aid in experimental data analysis. In this work, we developed a RASP model which can enhance structure prediction with restraints. Based on the Evoformer and structure module architecture of AlphaFold, this model can predict structure based on sequence and a flexible number of input restraints. Moreover, it can evaluate the consistency between the predicted structure and the imposed restraints. Based on this model, we constructed an iterative NMR NOESY peak assignment pipeline named FAAST, to accelerate assignment process of NOESY restraints and obtaining high quality structure ensemble. The RASP model and FAAST pipeline not only allow for the leveraging of experimental restraints to improve model prediction, but can also facilitate and expedite experimental data analysis with their integrated capabilities.
+Please check our paper ["Assisting and Accelerating NMR Assignment with Restrained Structure Prediction"](https://www.biorxiv.org/content/10.1101/2023.04.14.536890v1) for detailed information.
+A simple test cases is provided through Google Colab [FAAST_DEMO](https://colab.research.google.com/drive/1uaki0Ui1Y_gqVW7KSo838aOhXHSM3PTe?usp=sharing) for quick start. Since the RASP model and FAAST method are developed on the MindSpore+Ascend platform, this method works on both Ascend and GPU, and it would have better performance with this platform.
+Cite us
+```bibtex
+@article{Liu2023AssistingAA,
+title={Assisting and Accelerating NMR Assignment with Restrainted Structure Prediction},
+author={Sirui Liu and Haotian Chu and Yuantao Xie and Fangming Wu and Ningxi Ni and Chenghao Wang and Fangjing Mu and Jiachen Wei and Jun Zhang and Mengyun Chen and Junbin Li and F. Yu and Hui Fu and Shenlin Wang and Changlin Tian and Zidong Wang and Yi Qin Gao},
+journal={bioRxiv},
+year={2023}
+}
+```
+
+Contents
+
+
+
+- [FAAST and RASP](#faast-and-rasp)
+ - [Environment](#environment)
+ - [Hardware and Framework](#hardware-and-framework)
+ - [Installation and requirements](#installation-and-requirements)
+ - [Code Contents](#code-contents)
+ - [Example](#example)
+ - [RASP inference](#rasp-inference)
+ - [FAAST pipeline](#faast-pipeline)
+ - [**Examples of command**](#examples-of-command)
+ - [**Examples of runlog**](#examples-of-runlog)
+ - [**Comparison of Results**](#comparison-of-results)
+ - [Reference](#reference)
+ - [Acknowledgement](#acknowledgement)
+
+
+
+
+
+## Environment
+
+### Hardware and Framework
+
+RASP and FAAST are based on the computational biology library [MindSPONGE]() and MindSpore framework. MindSpore of version 2.0 and later are required. The installation of MindSpore refers to [MindSpore installation page](https://www.mindspore.cn/install). This toolkit works on either Ascend910 or GPU with more than 16GB RAM. Running with Ascend910 calls mixed precision by default, while full precision calculations is called for GPU .
+
+### Installation and requirements
+
+- MindSpore installation:
+ Download mindspore package:
+
+ PlatForm | Link
+ ----------|----------
+ Ascend-910 ARM |
+ Ascend-910 x86 |
+ GPU x86 |
+
+ Installation on Ascend requires hardware driver package:Ascend Data Center Solution 23.0.RC1,[Install Guide]()
+ Installation on GPU requires Nvidia CUDA version 11.1-11.8. Please refer to the [Installation Link]()
+
+- Install MindSPONGE:
+ Download Mindscience :
+
+ ``` shell
+ git clone https://gitee.com/mindspore/mindscience.git
+ ```
+
+ compile MindSPONGE wheel package:
+
+ ``` shell
+ cd ./mindscience/MindSPONGE/
+ ```
+
+ if on Ascend910 platform
+
+ ``` shell
+ bash build.sh -e ascend -j 8
+ ```
+
+ if on GPU platform
+
+ ``` shell
+ bash build.sh -e gpu -j 8
+ ```
+
+ Install wheel package
+
+ ``` shell
+ pip install ./output/mindsponge*.whl
+ ```
+
+- Install other requirements:
+ FAAST and RASP rely on searching and alignment tools such as HHSearch and Kalign. A one-click installation script is provided for easy use.
+
+ ``` shell
+ cd ./mindscience/MindSPONGE/applications/research/FAAST
+ sh ./install.sh
+ ```
+
+## Code Contents
+
+Code Contents
+
+```bash
+├── FAAST
+ ├── main.py // FAAST Main Script
+ ├── run_rasp.py // RASP Main Script
+ ├── README.md // FAAST README English Version
+ ├── README_CN.md // FAAST README Chinese Version
+ ├── extract_restraints.py // Extract The Restraint Sample File From The pdb
+ ├── search.py // Call Mmseqs Online Search For MSA And Templates
+ ├── install.sh // Install The Shell Script For Install Dependencies
+ ├── assign
+ ├── assign.py //Iterative Assignment
+ ├── init_assign.py //Initial Assignment
+ ├── commons
+ ├── analysis.py //Result Analysis Tool
+ ├── nmr_hydrogen_equivariance.txt //Hydrogen Equivalence
+ ├── res_constants.py //Hydrogen Equivalence Dictionary
+ ├── config
+ ├── data.yaml //Data Process Config
+ ├── model.yaml //Model Config
+ ├── data
+ ├── dataset.py // Dataset
+ ├── hhsearch.py // HHsearch Tool
+ ├── kalign.py // Kalign Tool
+ ├── msa_query.py // MSA Processing Tool
+ ├── parsers.py // Mmcif Processing
+ ├── preprocess.py // Data Processing
+ ├── protein_feature.py // MSA and Template Feature Search and Integration script
+ ├── templates.py // Template Processing Scripts
+ ├── utils.py // Common Func Scripts
+ ├── model
+ ├── fold.py // Main RASP Model
+ ├── module
+ ├── evoformer.py // Evoformer Module
+ ├── fold_wrapcell.py // Wrapper
+ ├── head.py // Heads
+ ├── structure.py // Structure Module
+ ├── template_embedding.py // Template Module
+ ├── nmr_relax
+ ├── model
+ ├── structure_violation.py //Calculate Whether The Structure Conforms To Physical Laws
+ ├── utils.py //Common Tools For Relaxtion
+ ├── relax
+ ├── amber_minimize.py //OpenMM Amber Force Field Minimization
+ ├── cleanup.py //Cleanup
+ ├── relax.py //Relaxation Scripts
+ ├── utils.py //Common tools for OpenMM
+```
+
+
+
+## Example
+
+### RASP inference
+
+The RASP model trained weights can be downloaded at [RASP.ckpt]() and
+the sample files can be downloaded in [example](),running following command to start inference.
+
+```bash
+Usage:python run_rasp.py --run_platform PLATFORM --use_pkl False --restraints_path RESTRAINTS_PATH
+ --input_path INPUT_FILE_PATH --checkpoint_file CHECKPOINT_FILE --use_template True --use_custom False
+ --a3m_path A3M_PATH --template_path TEMPLATE_PATH
+
+option:
+--restraints_path Path for the restraint information folder.Restraint information for each sample is stored in separate txt files with the same name as sequence/MSA.
+--run_platform The operating platform, can be Ascend or GPU
+--input_path The path for input .fasta/.pkl files. Multiple samples with different file names can be stored in one single folder.
+--checkpoint_file Model weight file path
+--use_pkl whether to use pkl file as input,default:False.
+--use_template whether to use template,default:True.
+--use_custom Whether to use custom MSA and template information provided by user, default:False.
+--a3m_path The MSA folder path for MSAs saved after searching, or custom MSAs provided by user.
+--template_path The template folder path for templates saved after searching, or custom templates provided by user.
+```
+
+RASP supports three types of input.
+
+1. Protein fasta sequence as input: in this mode the MSA and template are generated by online mmseqs search. Enter a3m_path and template_path to save the search results, use_pkl and use_custom need to be set to False in this mode.
+2. MSA and template files as input: MSA should be in .a3m format and template in .cif format. The MSA and templates can be searched with other tools by user or provided according to prior knowledge (e.g. known structure templates). To use this mode set use_pkl to False and use_custom to True, and enter the user-provided MSA and template paths a3m_path and template_path.
+3. Pre-processed feature(.pkl file) as input. use_pkl needs to be set to True. a3m_path and template path are not required. Preprocessed feature generation can refer to `./data/protein_feature.py:monomer_feature_generate`. It mainly deals with the characteristic information of the input sequence, the searched msa information and the template information, and saves the corresponding pkl file in `./pkl_file/` after each run of RASP model.
+
+**Restraint information**
+
+The model requires restraint information as input, and the restraint information takes the form of a (N,2) text file describing which residue pairs are close in 3-dimentional space like `[[1,2],... [2,10]]`, etc. Restraint information comes from various sources, including Nuclear Magnetic Resonance(NMR), Cross-Linking Mass Spectrometry (XL-MS) and so on. A sample script is provided to extract restraint information from the pdb, as shown below.
+
+```bash
+Usage:python extract_restraints.py --pdb_path PDB_PATH --output_file OUTPUT_FILE
+option:
+--pdb_path A pdb file that provides restraint information.
+--output_file Location of the file to output restraint information.
+```
+
+The following is an example file of restraint information. Each line is the spatial location of a pair of amino acids. Each location is separated by a space.
+
+``` log
+51 74
+46 60
+.. ..
+36 44
+70 46
+18 68
+```
+
+Predicted structures are stored in './result/ '.
+
+```log
+{confidence of predicted structrue :89.23, time :95.86,restraint recall :1.0}
+```
+
+
+
+
+
+For multi-domain structure 6XMV, both AlphaFold and MEGA-Fold provide inaccurate relative domain positions, however with restraint RASP is able to fix the inter-domain structure.
+
+### FAAST pipeline
+
+#### **Examples of command**
+
+The RASP model trained weights can be downloaded at [RASP.ckpt]() and
+the sample files can be downloaded in [example](),running following command to start inference. For custumization of the pipeline, please check the `assign_settings.py` for all settings.
+
+```bash
+Usage: python main.py --run_platform PLATFORM --use_pkl True --peak_and_cs_path PEAKLIST_PATH
+ --input_path INPUT_FILE_PATH --checkpoint_file CHECKPOINT_FILE --use_template True --use_custom False
+ --a3m_path A3M_PATH --template_path TEMPLATE_PATH
+
+option:
+--peak_and_cs_path Path of NOESY spectral data and chemical shift tables.
+--run_platform The operating platform, can be Ascend or GPU
+--input_path The path for input .fasta/.pkl files. Multiple samples with different file names can be stored in one single folder.
+--checkpoint_file Model weight file path
+--use_pkl whether to use pkl file as input, default:False.
+--use_template whether to use template, default:True.
+--use_custom Whether to use custom MSA and template information provided by user, default:False.
+--a3m_path The MSA folder path for MSAs saved after searching, or custom MSAs provided by user.
+--template_path The template folder path for templates saved after searching, or custom templates provided by user.
+```
+
+The input form supported by this method is the same as that of RASP model. Compared with RASP, it does not require restraint information, but requires chemical shift table and NOESY spectral peak list. The chemical shift table and NOESY spectral peak list of each protein sequence should be stored in an independent folder. For the file organization, refer to the sample file.
+
+**NOESY peak list** : The NOESY list file name must be `.txt` file whose names are beginning with `noelist_`. Each NOESY list file contains four columns of data separated by spaces. The first column is the resonance frequency of the heavy atom, the second column is the resonance frequency of the hydrogen atom connected with the heavy atom, the third column is the resonance frequency of the other hydrogen atom, and the fourth column is the peak intensity (volume). If multiple NOESY spectra exist, separate them into multiple `.txt` files for independent storage. Currently, only 3D-NOESY spectra are supported, as shown in the following example:
+
+``` log
+w1 w2 w3 volume
+119.73 4.584 8.102 7689.0
+119.73 3.058 8.102 1084.0
+119.73 3.057 8.102 1084.0
+119.73 7.005 8.102 317.0
+120.405 8.102 7.857 945.0
+......
+```
+
+The first list is heavy atom, the second list is hydrogen atom connected with heavy atom, the third list is H hydrogen atom connected with N, and the fourth list is peak intensity (volume). If there are multiple NOESY spectra, they need to be divided into multiple pkl files for independent storage. Currently, only 3D-NOESY spectral data is supported.
+
+**Chemical shift table** : The file name is named `chemical_shift_aligneds.txt` and contains five columns of data which are delimited by spaces, in order of atom name, atom type, chemical shift, residue number, and residue type, where the numbering of chemical shift must be aligned with the protein sequence in input_path, as shown in the following example:
+
+``` log
+atom_name atom_type chem_shift res_idx res_type
+HA H 4.584 10 HIS
+HB2 H 3.058 10 HIS
+HB3 H 3.057 10 HIS
+HD2 H 7.005 10 HIS
+CA C 56.144 10 HIS
+......
+```
+
+#### **Examples of runlog**
+
+As shown in following runlog example, FAAST runs multiple iterations(iteration), each of which runs the RASP model (repeat) several times, using randomly sampled partial constraint information to compute the protein structure. The 0th iteration is repeated only once, and the structure obtained is used to filter the poor constraint information obtained from the initial assignment. Starting from the first iteration, the RASP will be repeated several times for each iteration. The generated structure ensemble will be used for NOESY peak assignment. An evaluation of the assignment structure will be output at the same time. For details of the method of this pipeline, please refer to the methods section of the paper.
+
+```log
+# Initial structure prediction without restraint
+>>>>>>>>>>>>>>>>>>>>>>Protein name: 5W9F, iteration: 0, repeat: 0, number of input restraint pair: 0, confidence: 84.58, input restraint recall: 1.0,
+Violation of structure after relaxation: 0.0
+
+# Initial assignment
+Initial assignment:
+C 2L33 noelist_17169_spectral_peak_list_2.txt 4644 4626
+N 2L33 noelist_17169_spectral_peak_list_1.txt 1366 1210
+Filtering restraint with given structure.
+
+......
+
+# Structure prediction with RASP
+>>>>>>>>>>>>>>>>>>>>>>Protein name: 5W9F, iteration: 8, repeat: 0, number of input restraint pair: 62, confidence: 75.21, input restraint recall: 1.0,
+Violation of structure after relaxation: 0.0
+
+>>>>>>>>>>>>>>>>>>>>>>Protein name: 5W9F, iteration: 9, repeat: 1, number of input restraint pair: 56, confidence: 65.50, input restraint recall: 1.0,
+Violation of structure after relaxation: 0.0
+
+......
+
+# Assignment
+1st calibration and calculation of new distance-bounds done (calibration factor: 6.546974e+06)
+Time: 0.019391536712646484s
+Violation analysis done: 664 / 4447 restraints (14.9 %) violated.
+Time: 14.645306587219238s
+Final calibration and calculation of new distance-bounds done (calibration factor: 5.004552e+06).
+Time: 0.015628814697265625s
+Partial assignment done.
+Time: 15.671599626541138s
+
+......
+
+# Evaluation of assignment
+Iteration 1:
+protein name: 2L33
+restraints number per residue: 31.48
+long restraints number per residue: 7.67
+restraints-structure coincidence rate: 0.977
+long restraints structure coincidence rate: 0.9642
+
+......
+```
+
+- Protein name is the name of the protein;
+- Number of input restraint pair is the amount of input effective restraint pair;
+- Confidence is the credibility of the structure, where 0 is not at all trustworthy and 100 is very trustworthy. Credibility is positively correlated with the structure quality (correlation coefficient > 0.65).
+- Input restraint recall refers to the coincidence rate between inferred structures and input restraint information
+- Long restraints refer to the restraint information of residues pair with sequence separation greater than or equal to 4 inside primary restraints.
+
+#### **Comparison of Results**
+
+
+
+
+
+The figure above compares the resolution time and accuracy of the FAAST model to the traditional model. The Ascend 910 aarch64 system, for example, is deployed in about half an hour in an environment where the hardware driver package is already installed. The single sequence FAAST model runs on average half an hour, which means that even zero-based whitewashed nmr data can be analyzed in as little as one day, which would otherwise take several months or more.
+
+## Reference
+
+[1] Jumper J, Evans R, Pritzel A, et al. Applying and improving AlphaFold at CASP14[J]. Proteins: Structure, Function, and Bioinformatics, 2021.
+
+[2] Liu S, Zhang J, Chu H, et al. PSP: million-level protein sequence dataset for protein structure prediction[J]. arXiv preprint arXiv:2206.12240, 2022.
+
+[3] Terwilliger T C, Poon B K, Afonine P V, et al. Improved AlphaFold modeling with implicit experimental information[J]. Nat Methods, 2022.
+
+## Acknowledgement
+
+FAAST uses or references the following open source tools:
+
+- [ARIA]()
+- [ColabFold](https://github.com/sokrypton/ColabFold)
+- [AlphaFold2](https://github.com/deepmind/alphafold)
+- [Biopython](https://biopython.org)
+- [HH Suite](https://github.com/soedinglab/hh-suite)
+- [Kalign](https://msa.sbc.su.se/cgi-bin/msa.cgi)
+- [ML Collections](https://github.com/google/ml_collections)
+- [NumPy](https://numpy.org)
+- [OpenMM]()
+
+We thank all the contributors and maintainers of these open source tools!
diff --git a/MindSPONGE/applications/research/FAAST/README_CN.md b/MindSPONGE/applications/research/FAAST/README_CN.md
new file mode 100644
index 0000000000000000000000000000000000000000..f2f6b1f3312160850526bee0c4a5c0dd5c2e6ae1
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/README_CN.md
@@ -0,0 +1,357 @@
+[ENGLISH](README.md)|简体中文
+
+# FAAST and RASP
+
+已有的AI计算方法如MEGA-Fold/AlphaFold虽然极大地提高了预测静态蛋白质结构的准确性,但仍存在未解决的问题,例如生成动态构象和进行符合实验或先验信息的结构预测。为了解决这些问题我们在已有MEGA-Fold的基础上自研了RASP(Restraints Assisted Structure Predictor)模型,RASP模型能接受抽象或实验约束,使它能根据抽象或实验、稀疏或密集的约束生成结构。这使得RASP可用于多种应用,包括改进多结构域蛋白和msa较少的蛋白的结构预测。
+
+核磁共振方法(NMR)是唯一一种以原子分辨率解析更贴近蛋白质在实际环境下的溶液态构象与动态结构的方法[1][2],然而NMR实验数据获取与分析耗时长,平均单条蛋白需领域专家投入至少数月,其中大部分时间用于实验数据的解析和归属。现有NMR NOE谱峰数据解析方法如CARA,ARIA、CYANA等使用传统分子动力学模拟生成的结构迭代解析数据,解析速度慢,且从数据中解析出的约束信息和结构仍然需要大量专家知识,同时需要投入较长时间做进一步修正。为了提高 NMR 实验数据解析的速度和准确性,我们基于MindSpore+昇腾AI软硬件平台开发了NMR数据自动解析方法FAAST(iterative Folding Assisted peak ASsignmenT)。
+
+方便用户快速上手,我们在 Google 的 Colab 布置了简单的测试用例:[FAAST_DEMO](https://colab.research.google.com/drive/1uaki0Ui1Y_gqVW7KSo838aOhXHSM3PTe?usp=sharing)。测试版本支持有限(序列长度,推理速度),完整功能请尝试MindSpore+Ascend平台。
+
+更多信息请参考论文 ["Assisting and Accelerating NMR Assignment with Restrained Structure Prediction"](https://www.biorxiv.org/content/10.1101/2023.04.14.536890v1)。
+
+引用我们
+
+```bibtex
+@article{Liu2023AssistingAA,
+title={Assisting and Accelerating NMR Assignment with Restrainted Structure Prediction},
+author={Sirui Liu and Haotian Chu and Yuantao Xie and Fangming Wu and Ningxi Ni and Chenghao Wang and Fangjing Mu and Jiachen Wei and Jun Zhang and Mengyun Chen and Junbin Li and F. Yu and Hui Fu and Shenlin Wang and Changlin Tian and Zidong Wang and Yi Qin Gao},
+journal={bioRxiv},
+year={2023}
+}
+```
+
+
+
+目录
+
+
+
+- [FAAST and RASP](#faast-and-rasp)
+ - [环境配置](#%E7%8E%AF%E5%A2%83%E9%85%8D%E7%BD%AE)
+ - [硬件环境与框架](#%E7%A1%AC%E4%BB%B6%E7%8E%AF%E5%A2%83%E4%B8%8E%E6%A1%86%E6%9E%B6)
+ - [安装依赖](#%E5%AE%89%E8%A3%85%E4%BE%9D%E8%B5%96)
+ - [代码目录](#%E4%BB%A3%E7%A0%81%E7%9B%AE%E5%BD%95)
+ - [运行示例](#%E8%BF%90%E8%A1%8C%E7%A4%BA%E4%BE%8B)
+ - [约束信息结构预测模型运行示例](#%E7%BA%A6%E6%9D%9F%E4%BF%A1%E6%81%AF%E7%BB%93%E6%9E%84%E9%A2%84%E6%B5%8B%E6%A8%A1%E5%9E%8B%E8%BF%90%E8%A1%8C%E7%A4%BA%E4%BE%8B)
+ - [FAAST-NMR数据自动解析方法运行示例](#faast-nmr%E6%95%B0%E6%8D%AE%E8%87%AA%E5%8A%A8%E8%A7%A3%E6%9E%90%E6%96%B9%E6%B3%95%E8%BF%90%E8%A1%8C%E7%A4%BA%E4%BE%8B)
+ - [**运行命令**](#%E8%BF%90%E8%A1%8C%E5%91%BD%E4%BB%A4)
+ - [**日志示例**](#%E6%97%A5%E5%BF%97%E7%A4%BA%E4%BE%8B)
+ - [**结果对比**](#%E7%BB%93%E6%9E%9C%E5%AF%B9%E6%AF%94)
+ - [引用](#%E5%BC%95%E7%94%A8)
+ - [致谢](#%E8%87%B4%E8%B0%A2)
+
+
+
+
+
+## 环境配置
+
+### 硬件环境与框架
+
+本工具基于[MindSPONGE](https://gitee.com/mindspore/mindscience/tree/master/MindSPONGE)生物计算库与[MindSpore](https://www.mindspore.cn/)AI框架开发,MindSpore 2.0及以后的版本均可运行,MindSpore安装和配置可以参考[MindSpore安装页面](https://www.mindspore.cn/install)。本工具可以在Ascend910或16G以上内存的GPU上运行,基于Ascend运行时默认调用混合精度,基于GPU运行时使用全精度计算。
+
+### 安装依赖
+
+- 安装MindSpore:
+ 下载mindspore wheel包:
+
+ 平台 | 链接
+ ----------|----------
+ Ascend-910平台 ARM操作系统 |
+ Ascend-910平台 x86操作系统 |
+ GPU平台 x86操作系统 |
+
+ 该版本mindspore对应昇腾硬件驱动包版本为:Ascend Data Center Solution 23.0.RC1,详细安装链接参考:<
+ 对应的英伟达cuda版本为11.1-11.8,安装链接可以参考:cuda安装链接()
+
+ 安装 wheel 包
+
+ ``` shell
+ pip install mindspore*.whl
+ ```
+
+- 安装MindSPONGE:
+ 下载 Mindscience仓,并编译 MindSPONGE包:
+
+ ``` shell
+ git clone https://gitee.com/mindspore/mindscience.git
+ cd ./mindscience/MindSPONGE/
+ ```
+
+ 若在Ascend910平台
+
+ ``` shell
+ bash build.sh -e ascend -j 8
+ ```
+
+ 若在GPU平台
+
+ ``` shell
+ bash build.sh -e gpu -j 8
+ ```
+
+ 安装 wheel 包
+
+ ``` shell
+ pip install ./output/mindsponge*.whl
+ ```
+
+- 安装其它依赖包:
+ 本工具依赖hhsearch 与 kalign 等搜索工具,可通过一键安装脚本自动配置(注意该脚本需要在FAAST目录下运行)
+
+ ``` shell
+ cd ./mindscience/MindSPONGE/applications/research/FAAST
+ sh ./install.sh
+ ```
+
+## 代码目录
+
+代码目录
+
+```bash
+├── FAAST
+ ├── main.py // FAAST主脚本
+ ├── run_rasp.py // RASP主脚本
+ ├── README.md // FAAST相关英文说明
+ ├── README_CN.md // FAAST相关中文说明
+ ├── extract_restraints.py // 从pdb提取约束样例文件
+ ├── search.py // ColabFold的mmseqs在线搜索
+ ├── install.sh // 安装相关依赖的shell脚本
+ ├── assign
+ ├── assign.py //迭代指认脚本
+ ├── init_assign.py //初始指认脚本
+ ├── commons
+ ├── analysis.py //结果分析工具
+ ├── nmr_hydrogen_equivariance.txt //氢原子简并性列表
+ ├── res_constants.py //氢原子简并性解析字典
+ ├── config
+ ├── data.yaml //数据处理参数配置
+ ├── model.yaml //模型参数配置
+ ├── data
+ ├── dataset.py // 异步数据读取脚本
+ ├── hhsearch.py // python封装的HHsearch工具
+ ├── kalign.py // python封装的Kalign工具
+ ├── msa_query.py // python封装的MSA处理工具
+ ├── parsers.py // mmcif文件读取脚本
+ ├── preprocess.py // 数据预处理脚本
+ ├── protein_feature.py // MSA与template特征搜索与整合脚本
+ ├── templates.py // 模板处理脚本
+ ├── utils.py // 数据处理所需功能函数
+ ├── model
+ ├── fold.py // RASP主模型脚本
+ ├── module
+ ├── evoformer.py // evoformer特征提取模块
+ ├── fold_wrapcell.py // 训练迭代封装模块
+ ├── head.py // FAAST附加输出模块
+ ├── structure.py // 3D结构生成模块
+ ├── template_embedding.py // 模板信息提取模块
+ ├── nmr_relax
+ ├── model
+ ├── structure_violation.py //计算结构是否存在严重违约
+ ├── utils.py //运行relax时的通用工具
+ ├── relax
+ ├── amber_minimize.py //运行openmm relax的主教本
+ ├── cleanup.py //清除相关进程的脚本
+ ├── relax.py //运行relax的主脚本
+ ├── utils.py //运行openmm relax的通用工具
+```
+
+
+
+## 运行示例
+
+### 约束信息结构预测模型运行示例
+
+下载RASP模型训练好的权重:[RASP.ckpt](),相关运行示例文件可以在[样例文件]()下载,运行以下命令启动推理。
+
+```bash
+用法:python run_rasp.py --run_platform PLATFORM --use_pkl False --restraints_path RESTRAINTS_PATH
+ --input_path INPUT_FILE_PATH --checkpoint_file CHECKPOINT_FILE --use_template True --use_custom False
+ --a3m_path A3M_PATH --template_path TEMPLATE_PATH
+
+选项:
+--restraints_path 约束信息文件夹位置,其中单个约束信息文件以txt形式保存
+--run_platform 运行平台,可选Ascend或GPU
+--input_path 输入文件夹目录,可包含多个.fasta/.pkl文件
+--checkpoint_file 模型权重文件路径
+--use_pkl 使用pkl数据作为输入,默认False
+--use_template 是否使用template信息, 默认True
+--use_custom 是否使用搜索好的msa信息与template信息, 默认False
+--a3m_path 搜索后保存的的a3m文件夹位置,或者直接提供的a3m文件路径位置
+--template_path 搜索后保存的cif文件位夹置,或者直接提供的cif文件路径位置
+```
+
+RASP模型支持三种模式的输入:
+
+1. 输入原始fasta序列,通过在线mmseqs检索得到MSA和template,需要将use_pkl与use_custom设为False,同时输入a3m_path与template_path作为保存搜索结果的路径;
+2. 输入用户提供的MSA与template文件,其中MSA为a3m格式,template为cif格式,可以由用户自行检索或者由经验知识提供;需要将use_pkl设为False 与use_custom设为True,同时输入用户提供的MSA和template路径a3m_path 与 template_path;
+3. 输入提前预处理好得到的pkl文件,需要将use_pkl设为True,不需要额外输入a3m_path与template path。
+
+ pkl文件的预处理可以参考`./data/protein_feature.py:monomer_feature_generate`函数,该函数主要处理输入序列的特征信息,搜索到的msa信息以及template信息。为了方便使用,每次运行完一次RASP模型会在 ./pkl_file/保存对应的pkl文件。也可以参考[样例pkl文件](https://download.mindspore.cn/mindscience/mindsponge/FAAST/example/pkl/2L33.pkl)。
+
+**约束信息**
+
+该模型额外需要restraints信息作为输入,约束信息是指形如`[[1,2],...,[2,10]]`等多维二进制序列代表氨基酸对的空间位置信息,为了方便用户使用,这里输入的约束信息需要以.txt后缀形式输入。同时约束信息的来源多样,包括核磁共振波谱法、质谱交联等等,这里提供了一个从pdb提取约束信息的样例脚本,用法如下。
+
+```bash
+用法 python extract_restraints.py --pdb_path PDB_PATH --output_file OUTPUT_FILE
+选项:
+--pdb_path 提供约束信息的pdb文件
+--output_file 输出约束信息的文件位置
+```
+
+以下是约束信息样例文件,每一行即是一对氨基酸的空间位置信息,每个位置信息间用一个空格隔开。
+
+``` log
+51 74
+46 60
+36 44
+.. ..
+70 46
+18 68
+```
+
+推理结果保存在 `./result/`。
+
+```log
+{confidence of predicted structrue :89.23, time :95.86,restraint recall :1.0}
+```
+
+
+
+
+
+图A分别是原始PDB、AlphaFold、MEGA-Fold、RASP 的结果,可以看出在多域蛋白6XMV上RASP模型推理得到结果更接近真实结构。
+
+### FAAST-NMR数据自动解析方法运行示例
+
+#### **运行命令**
+
+下载RASP模型训练好的权重:[RASP.ckpt](),相关运行示例文件可以在[样例文件]()下载,运行以下命令启动推理。调整迭代配置可通过修改`assign_settings.py`中相关参数实现。
+
+```bash
+用法:python main.py --run_platform PLATFORM --use_pkl True --peak_and_cs_path PEAKLIST_PATH
+ --input_path INPUT_FILE_PATH --checkpoint_file CHECKPOINT_FILE --use_template True --use_custom False
+ --a3m_path A3M_PATH --template_path TEMPLATE_PATH
+
+选项:
+--peak_and_cs_path 化学位移表和NOESY谱峰列表所在路径
+--run_platform 运行平台,可选Ascend或GPU
+--input_path 输入文件目录,可包含多个.fasta/.pkl文件
+--checkpoint_file 模型权重文件路径
+--use_pkl 使用pkl数据作为输入,默认False
+--use_template 是否使用template信息, 默认True
+--use_custom 是否使用搜索好的msa信息与template信息, 默认False
+--a3m_path 搜索后保存的的a3m文件夹位置,或者直接提供的a3m文件路径位置
+--template_path 搜索后保存的cif文件位夹置,或者直接提供的cif文件路径位置
+```
+
+该方法支持的输入形式与RASP模型类似,区别在于不需要约束信息,但需要化学位移表与NOESY谱峰数据,每条蛋白质序列的化学位移表与NOESY谱峰数据需存放在独立的文件夹中,文件组织形式请参考样例文件。
+
+**NOESY谱数据**:文件名必须是以`noelist_`开头的`.txt`文件,包含四列数据,以空格符分隔,其中第一列为重原子的共振频率,第二列为与重原子相连的氢原子的共振频率,第三列为另一个氢原子的共振频率,第四列为峰强度(volume),若存在多个NOESY谱,需分为多个`.txt`文件独立存储,当前仅支持3D-NOESY谱数据。文件示例如下:
+
+``` log
+w1 w2 w3 volume
+119.73 4.584 8.102 7689.0
+119.73 3.058 8.102 1084.0
+119.73 3.057 8.102 1084.0
+119.73 7.005 8.102 317.0
+120.405 8.102 7.857 945.0
+......
+```
+
+**化学位移表**:文件名以`chemical_shift_aligned.txt`命名,包含以空格符分隔的五列数据,按顺序分别为原子名称,原子类型,化学位移,原子所属残基编号,原子所属残基类型,其中原子所属残基编号必须与input_path中的序列对齐。文件示例如下:
+
+``` log
+atom_name atom_type chem_shift res_idx res_type
+HA H 4.584 10 HIS
+HB2 H 3.058 10 HIS
+HB3 H 3.057 10 HIS
+HD2 H 7.005 10 HIS
+CA C 56.144 10 HIS
+......
+```
+
+#### **日志示例**
+
+以下为运行日志示例,FAAST会运行多轮迭代(iteration),每次迭代会运行多次RASP模型(repeat)使用随机采样的部分约束信息计算蛋白质结构。第0次迭代仅重复一次,所得结构用于过滤初始指认所得的约束信息中较差的约束信息。第1次迭代开始每次迭代重复多次推理,结构用于NOESY峰指认(assignment),同时输出指认结构的评估(Evaluation of assignment),方法详情请参考论文方法部分。
+
+```log
+# Initial structure prediction without restraint
+>>>>>>>>>>>>>>>>>>>>>>Protein name: 5W9F, iteration: 0, repeat: 0, number of input restraint pair: 0, confidence: 84.58, input restraint recall: 1.0,
+Violation of structure after relaxation: 0.0
+
+# Initial assignment
+Initial assignment:
+C 2L33 noelist_17169_spectral_peak_list_2.txt 4644 4626
+N 2L33 noelist_17169_spectral_peak_list_1.txt 1366 1210
+Filtering restraint with given structure.
+
+......
+
+# Structure prediction with RASP
+>>>>>>>>>>>>>>>>>>>>>>Protein name: 5W9F, iteration: 8, repeat: 0, number of input restraint pair: 62, confidence: 75.21, input restraint recall: 1.0,
+Violation of structure after relaxation: 0.0
+
+>>>>>>>>>>>>>>>>>>>>>>Protein name: 5W9F, iteration: 9, repeat: 1, number of input restraint pair: 56, confidence: 65.50, input restraint recall: 1.0,
+Violation of structure after relaxation: 0.0
+
+......
+
+# Assignment
+1st calibration and calculation of new distance-bounds done (calibration factor: 6.546974e+06)
+Time: 0.019391536712646484s
+Violation analysis done: 664 / 4447 restraints (14.9 %) violated.
+Time: 14.645306587219238s
+Final calibration and calculation of new distance-bounds done (calibration factor: 5.004552e+06).
+Time: 0.015628814697265625s
+Partial assignment done.
+Time: 15.671599626541138s
+
+......
+
+# Evaluation of assignment
+Iteration 1:
+protein name: 2L33
+restraints number per residue: 31.48
+long restraints number per residue: 7.67
+restraints-structure coincidence rate: 0.977
+long restraints structure coincidence rate: 0.9642
+
+......
+```
+
+Protein name是该蛋白的名字。,number of input restraint pair是有效的输入的约束信息数量,confidence 是所得结构的可信度,0为完全不可信,100为非常可信,可信度与结构质量正相关(相关系数>0.65),input restraint recall是指推理所得结构与输入约束信息的符合率。long restraints是指蛋白质一级序列中残基编号距离大于等于4的残基对约束信息。
+
+#### **结果对比**
+
+
+
+
+
+上图是FAAST方法和传统方法的解析时间及精度的对比,以ARM+Ascend910平台为例,在一台硬件驱动包已经安装好的环境,单条序列NOESY峰指认平均耗时半个小时,且解析出的约束数量与约束-结构符合率持平人工解析。
+
+## 引用
+
+[1] Jumper J, Evans R, Pritzel A, et al. Applying and improving AlphaFold at CASP14[J]. Proteins: Structure, Function, and Bioinformatics, 2021.
+
+[2] Liu S, Zhang J, Chu H, et al. PSP: million-level protein sequence dataset for protein structure prediction[J]. arXiv preprint arXiv:2206.12240, 2022.
+
+[3] Terwilliger T C, Poon B K, Afonine P V, et al. Improved AlphaFold modeling with implicit experimental information[J]. Nat Methods, 2022.
+
+## 致谢
+
+FAAST使用或参考了以下开源工具:
+
+- [ARIA]()
+- [ColabFold](https://github.com/sokrypton/ColabFold)
+- [AlphaFold2](https://github.com/deepmind/alphafold)
+- [Biopython](https://biopython.org)
+- [HH Suite](https://github.com/soedinglab/hh-suite)
+- [Kalign](https://msa.sbc.su.se/cgi-bin/msa.cgi)
+- [ML Collections](https://github.com/google/ml_collections)
+- [NumPy](https://numpy.org)
+- [OpenMM]()
+
+我们感谢这些开源工具所有的贡献者和维护者!
diff --git a/MindSPONGE/applications/research/FAAST/assign/__init__.py b/MindSPONGE/applications/research/FAAST/assign/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..33f6859b138c8364e5dd6bffd56b369b0fcc7303
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/assign/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"init"
diff --git a/MindSPONGE/applications/research/FAAST/assign/assign.py b/MindSPONGE/applications/research/FAAST/assign/assign.py
new file mode 100644
index 0000000000000000000000000000000000000000..939ec26842ac86b317834a76815d00a27f92a4db
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/assign/assign.py
@@ -0,0 +1,860 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"assign"
+import os
+import stat
+import time
+import io
+import itertools
+import pickle
+from Bio.PDB import PDBParser
+import numpy as np
+
+from commons.res_constants import EQUI_VARIANCE
+from assign.init_assign import get_ur_list2
+
+YES = "yes"
+NO = "no"
+
+
+def make_ensembles(pdb_list):
+ '''make_ensembles'''
+ ensembles = []
+ for pdb_file in pdb_list:
+ with open(pdb_file, "r") as f:
+ pdb_str = f.read()
+ pdb_fh = io.StringIO(pdb_str)
+ parser = PDBParser()
+ structure = parser.get_structure('none', pdb_fh)
+ model = list(structure.get_models())[0] # todo: multi models
+ chain = list(model.get_chains())[0] # todo: multimer
+
+ ensemble = {}
+ start_id = -1
+ for _, res in enumerate(chain):
+ res_id = int(res.id[1])
+
+ if start_id == -1:
+ start_id = res_id
+ res_id = res_id - start_id + 1
+
+ ensemble[res_id] = {"aatype": res.resname}
+
+ for atom in res:
+ ensemble.get(res_id)[atom.name] = atom.coord
+ ensembles.append(ensemble)
+ return ensembles
+
+
+def calculatebounds(factor, peaks, settings, bound_corrected=None, ensemble=None):
+ """
+ calculate lower and upper bounds for every peak using
+ the calibration 'factor'. values are stored.
+ 'bound_corrected': list of restraints which are classified
+ as correct after bound-modification.
+ """
+
+ if settings.get("calibration").get('use_bounds') == YES:
+ va_settings = settings.get("violation_analysis")
+
+ new_lbound = va_settings.get('lower_bound_correction').get('value')
+ new_ubound = va_settings.get('upper_bound_correction').get('value')
+ new_d = (new_ubound - new_lbound) / 2.
+
+ # [r['upper_bound'] = new_ubound for r in peaks]#dict
+ # [r['lower_bond'] = new_lbound for r in peaks]
+ # [r['distance'] = new_d for r in peaks]
+
+ for r in peaks:
+ r['upper_bound'] = new_ubound
+ r['lower_bond'] = new_lbound
+ r['distance'] = new_d
+
+ return
+
+ factor = np.power(factor, 1. / 6)
+
+ peak_sizes = get_refpeak_sizes(peaks, settings.get("calibration").get("volume_or_intensity"))
+
+ # Malliavin/Bardiaux rMat
+ cs = settings.get("calibration")
+ if cs.get('relaxation_matrix') == YES and ensemble is not None:
+
+ ispa_peak_sizes = np.array([p.getIspa() for p in peaks])
+ peak_theoric_vol = np.array([p.getTheoricVolume() for p in peaks])
+
+ ratio = ispa_peak_sizes / peak_theoric_vol
+ distances = factor * np.power(peak_sizes * ratio, -1. / 6)
+
+
+ else:
+
+ distances = factor * np.power(peak_sizes, -1. / 6)
+
+ ## TODO: hard-coded 0.125
+
+ if cs['error_estimator'] == 'intensity':
+ errors = 0.125 * np.power((factor * np.power(peak_sizes, -1. / 6)), 2.)
+
+ else:
+ errors = 0.125 * np.power(distances, 2.)
+
+ ## lower bounds are >= 0.
+
+ lower_bounds = np.clip(distances - errors, 0., 1.e10)
+ upper_bounds = distances + errors
+
+ for i, _ in enumerate(peaks):
+ peak = peaks[i]
+
+ peak['distance'] = distances[i]
+ peak['lower_bond'] = lower_bounds[i]
+ peak['upper_bound'] = upper_bounds[i]
+
+ ## Set new (fixed) bounds for bound-corrected restraints
+
+ if bound_corrected:
+ va_settings = settings.get("violation_analysis")
+
+ if va_settings.get('lower_bound_correction')['enabled'] == YES:
+ new_bound = va_settings.get('lower_bound_correction').get('value')
+ [r.setLowerBound(new_bound) for r in bound_corrected]
+
+ if va_settings.get('upper_bound_correction').get('enabled') == YES:
+ new_bound = va_settings.get('upper_bound_correction').get('value')
+ [r.setUpperBound(new_bound) for r in bound_corrected]
+
+
+def getdistances(atom1, atom2, ensemble=None, number_of_best_structures=7):
+ '''getdistances'''
+ res1 = atom1.get('res')
+ atype1 = atom1.get('name')
+ restype1 = atom1.get("restype")
+ res2 = atom2.get('res')
+ atype2 = atom2.get('name')
+ restype2 = atom2.get("restype")
+
+ if ensemble is None:
+ raise ValueError('No coordinates have been read in.')
+ ## calculate distances
+ d = [1000000.0] * len(ensemble)
+
+ if res1 in ensemble[0].keys() and res2 in ensemble[0].keys():
+ atype1_equi = set(list(ensemble[0][res1].keys())).intersection(
+ EQUI_VARIANCE.get(restype1).get(atype1).get("equivariance"))
+
+ atype2_equi = set(list(ensemble[0][res2].keys())).intersection(
+ EQUI_VARIANCE.get(restype2).get(atype2).get("equivariance"))
+
+ if atype1_equi and atype2_equi:
+ x_all = []
+ y_all = []
+ for atype1, atype2 in itertools.product(atype1_equi, atype2_equi):
+
+ x_perstruct = []
+ y_perstruct = []
+ for _, structure in enumerate(ensemble):
+ if atype1 in structure[res1].keys() and atype2 in structure[res2].keys():
+ x_perstruct.append(structure[res1][atype1])
+ y_perstruct.append(structure[res2][atype2])
+
+ x = np.stack(x_perstruct)
+ x_all.append(x)
+ y = np.stack(y_perstruct)
+ y_all.append(y)
+ x_all = np.stack(x_all)
+ y_all = np.stack(y_all)
+
+ d = np.sqrt(np.sum(np.power(x_all - y_all, 2), -1))
+
+ d = np.min(d, axis=0)
+
+ else:
+ print(res1, restype1, ensemble[0][res1].keys(), EQUI_VARIANCE.get(restype1).get(atype1).get("equivariance"))
+ print(res2, restype2, ensemble[0][res2].keys(), EQUI_VARIANCE.get(restype2).get(atype2).get("equivariance"))
+ print(atom1, atom2)
+
+ n = number_of_best_structures
+ if n != 'all':
+ d = d[:n]
+ return d
+
+
+def effective_distances(contribution, ensemble):
+ '''effective_distances'''
+ d = [getdistances(sp.get('Atom1'), sp.get('Atom2'), ensemble) for sp in contribution.get('spin_pairs')]
+
+ ## for each structure: calculate partial volume
+
+ volumes = np.sum(np.power(d, -6.), axis=0)
+
+ return np.power(volumes, -1. / 6)
+
+
+def average(x, n=None, exponent=1., axis=0): # y
+ """
+ Returns (n^{-1} sum_1^n x_i^exponent)^{1/exponent}.
+ if 'n' is not None, it is used instead of len(x)
+ sum is taken wrt to axis 'axis'
+ """
+ x = np.array(x, float)
+
+ if n is None:
+ n = np.shape(x)[axis]
+
+ return (np.sum(np.power(x, exponent), axis) / n) ** (1. / exponent)
+
+
+def analysepeak2(ensemble, peak, tol, va_settings, lower_correction=None,
+ upper_correction=None, sig_mode="fix"):
+ '''analysepeak2'''
+
+ ## for every structure: calculate effective contributon-distance
+ ## d_avg is a [n_c x n_s] dim. array
+ ## n_c: number of contributions
+ ## n_s: number of structures in ensemble
+
+ d_avg = [effective_distances(c, ensemble) \
+ for c in peak.get('analysis').get('contributions')]
+ d_avg = np.stack(d_avg)
+
+ ## Effective lower/upper bounds
+
+ if tol is None:
+ tol = va_settings.get('violation_tolerance')
+
+ if lower_correction is not None:
+ lower = lower_correction
+ else:
+ lower = peak.get('lower_bond')
+
+ if upper_correction is not None:
+ upper = upper_correction
+ else:
+ upper = peak.get('upper_bound')
+
+ dist = peak.get('distance')
+
+ if sig_mode == "fix":
+ violated_lower = np.less(d_avg, lower - tol)
+ violated_upper = np.greater(d_avg, upper + tol)
+ else:
+ violated_lower = np.less(d_avg, dist - tol)
+ violated_upper = np.greater(d_avg, dist + tol)
+
+ violated = np.logical_or(violated_lower, violated_upper)
+
+ violated = 1 - (violated.shape[0] > violated).sum(axis=0) > 0
+
+ r_viol = float(sum(violated)) / float(len(violated))
+
+ return r_viol
+
+
+def analysepeak(ensemble, peak, tol, va_settings,
+ lower_correction=None, upper_correction=None, sig_mode="fix"):
+ '''analysepeak'''
+
+ ## for every structure: calculate effective contributon-distance
+ ## d_avg is a [n_c x n_s] dim. array
+ ## n_c: number of contributions
+ ## n_s: number of structures in ensemble
+
+ d_avg = [effective_distances(c, ensemble) \
+ for c in peak.get('analysis').get('contributions')]
+
+ d_avg = np.power(np.sum(np.power(d_avg, -6), axis=0), -1. / 6)
+
+ ## Effective lower/upper bounds
+
+ if tol is None:
+ tol = va_settings.get('violation_tolerance')
+
+ if lower_correction is not None:
+ lower = lower_correction
+ else:
+ lower = peak.get('lower_bond')
+
+ if upper_correction is not None:
+ upper = upper_correction
+ else:
+ upper = peak.get('upper_bound')
+
+ dist = peak.get('distance')
+ ## calculate fraction of violated distances
+ ## 1: distance is violated, 0: distance lies within bounds
+
+ if sig_mode == "fix":
+ violated_lower = np.less(d_avg, lower - tol)
+ violated_upper = np.greater(d_avg, upper + tol)
+ else:
+ violated_lower = np.less(d_avg, dist - tol)
+ violated_upper = np.greater(d_avg, dist + tol)
+
+ violated = np.logical_or(violated_lower, violated_upper)
+
+ r_viol = float(sum(violated)) / float(len(violated))
+
+ return r_viol
+
+
+def tolerance(ensemble, peak):
+ '''tolerance'''
+
+ ## for every structure: calculate effective contributon-distance
+ ## d_avg is a [n_c x n_s] dim. array
+ ## n_c: number of contributions
+ ## n_s: number of structures in ensemble
+
+ d_eff = [effective_distances(c, ensemble) \
+ for c in peak.getContributions()]
+
+ ## Effective lower/upper bounds
+ dist = peak.getDistance()
+
+ d_eff = np.power(sum(np.power(d_eff, -6), axis=0), -1. / 6)
+ for i, _ in enumerate(d_eff):
+ d_eff[i] = abs(dist - d_eff[i])
+
+ return d_eff
+
+
+def doviolationanalysis(restraints, ensemble, va_settings):
+ """
+ 'restraints': list of AriaPeaks. The bounds of every
+ restraint in that list is checked against distances found
+ in the 'ensemble'.
+ 'targets': list of AriaPeaks. The violationAnalyser will
+ store all intermediate results in their analysis-section.
+ Note: we assume, that peaks[i] corresponds to results[i]
+ for all i !. If a restraint has been violated, the
+ corresponding 'target'_restraint will be marked as violated.
+ """
+
+ violated = []
+ non_violated = []
+
+ ## get threshold for current iteration
+
+ if va_settings.get('sigma_mode') == 'auto':
+ ecars = []
+ for restraint in restraints:
+ temp_ecars = tolerance(ensemble, restraint)
+ for ecar in temp_ecars:
+ ecars.append(ecar)
+
+ ecar_avg = sum(ecars) / len(ecars)
+ tol = 0
+ for n_ecar in ecars:
+ tol = tol + np.power(n_ecar - ecar_avg, 2)
+ print("TOLERANCE ", np.power(tol / len(ecars), 1. / 2))
+ tol = np.power(tol / len(ecars), 1. / 2) * va_settings['violation_tolerance']
+ print("AVG RESTRAINT ", tol)
+
+ else:
+ tol = None
+
+ for restraint in restraints:
+ r_viol = analysepeak(ensemble, restraint, tol, va_settings, sig_mode=va_settings['sigma_mode'])
+
+ ##
+ ## If a restraint has been violated in too many structures
+ ## (according to 'threshold'), mark is a violated.
+ ##
+ threshold = va_settings.get('violation_threshold')
+ if r_viol > threshold:
+ restraint.get('analysis')['is_violated'] = 1
+ violated.append(restraint)
+
+ else:
+ restraint.get('analysis')['is_violated'] = 0
+ non_violated.append(restraint)
+
+ ## For violated restraints: if bound-correction is enabled,
+ ## repeat violation-analysis with modified bounds.
+
+ if va_settings.get('lower_bound_correction').get('enabled') == YES:
+ new_lower = va_settings.get('lower_bound_correction').get('value')
+ else:
+ new_lower = None
+
+ if va_settings.get('upper_bound_correction').get('enabled') == YES:
+ new_upper = va_settings.get('upper_bound_correction').get('value')
+ else:
+ new_upper = None
+
+ if new_lower is not None or new_upper is not None:
+
+ ## We forget 'store_analysis' here, since it has already
+ ## been stored (if set).
+
+ r_viol = [analysepeak(ensemble, r, tol, va_settings,
+ lower_correction=new_lower,
+ upper_correction=new_upper,
+ sig_mode=va_settings.get('sigma_mode')) for r in violated]
+
+ ## List of restraint-indices which are no longer
+ ## violated after bound modification.
+
+ indices = np.flatnonzero(np.less(r_viol, threshold))
+ new_non_violated = [violated[i] for i in indices]
+
+ [r.analysis.isViolated(0) for r in new_non_violated]
+
+ else:
+ new_non_violated = None
+
+ return violated, non_violated, new_non_violated
+
+
+def get_refpeak_sizes(peaks, volume_or_intensity="volume"):
+ '''get_refpeak_sizes'''
+ ref_peaks = [p.get('ref_peak') for p in peaks]
+
+ if volume_or_intensity == 'volume':
+ peak_sizes = [p.get('volume')[0] for p in ref_peaks]
+ else:
+ peak_sizes = [p.get('intensity')[0] for p in ref_peaks]
+ return peak_sizes
+
+
+def calculatepeaksize(peak, ensemble):
+ '''calculatepeaksize'''
+
+ if not peak:
+ raise ValueError('No contributions in xpk: %d' %
+ peak.getId())
+
+ ## for each structure: calculate effective distance
+ ## for contribution, i.e. distances between atoms
+ ## of every spinpair are averaged according to the
+ ## type of the given contribution.
+
+ avg_distances = [effective_distances(c, ensemble) for c in peak.get('analysis').get('contributions')]
+
+ ## for each contribution: calculate ensemble-average
+ ## TODO: average -> _average, probably faster
+ avg_distances = average(avg_distances, axis=1)
+
+ ## calculate NOEs
+ d = np.power(avg_distances, -6.)
+
+ ## NOE is sum over partial NOEs
+
+ if np.sum(d) == np.inf:
+ print(d)
+
+ for c in peak.get('analysis').get('contributions'):
+ if np.sum(effective_distances(c, ensemble)) < 0.00001:
+ print(c)
+ print(effective_distances(c, ensemble))
+
+ print(peak)
+ raise ValueError
+ return np.sum(d)
+
+
+def dodumbocalibration(peaks, volume_or_intensity="volume"):
+ '''dodumbocalibration'''
+ peak_sizes = get_refpeak_sizes(peaks, volume_or_intensity)
+
+ ## Assume that average distance of atoms
+ ## causing an NOE is 3.0A
+
+ d_calib = 3.0
+
+ sum_noe_calc = len(peaks) * (d_calib ** -6)
+
+ factor = sum(peak_sizes, dtype=np.float64) / sum_noe_calc
+
+ return factor
+
+
+def calculateestimator(peaks, ensemble, calibration_settings, use_cutoff=1):
+ '''calculateestimator'''
+ if not peaks:
+ raise (ValueError, 'No peaks specified.')
+
+ if calibration_settings['volume_or_intensity'] == 'volume':
+ exp_peak_sizes = [p['ref_peak']['volume'][0] \
+ for p in peaks] # dict
+ else:
+ exp_peak_sizes = [p['ref_peak']['intensity'][0] \
+ for p in peaks] # dict
+
+ model_peak_sizes = np.array([calculatepeaksize(p, ensemble) for p in peaks])
+
+ ## larger than noe_cutoff.
+
+ if use_cutoff:
+ noe_cutoff = calibration_settings['distance_cutoff'] ** (-6.)
+ else:
+ noe_cutoff = 0.
+
+ strong_noes = np.greater_equal(model_peak_sizes, noe_cutoff)
+
+ sum_noe_model = np.sum(np.compress(strong_noes,
+ model_peak_sizes))
+ sum_noe_exp = np.sum(np.compress(strong_noes,
+ exp_peak_sizes), dtype=np.float64)
+
+ ## if there are no NOEs larger than noe_cutoff,
+
+ if sum_noe_model <= 1.e-30:
+ return None
+
+ ## calculate estimator
+ if calibration_settings['estimator'] == 'ratio_of_averages':
+ factor = sum_noe_exp / sum_noe_model
+
+ ## store calculated peak-size
+
+ return factor
+
+
+def docalibration(restraints, ensemble, all_settings):
+ '''docalibration'''
+ # BARDIAUX 2.2
+ # ConstraintList can bypass calibration
+
+ calibration_settings = all_settings["calibration"]
+ if calibration_settings['use_bounds'] == YES:
+ print('calibration disabled')
+ return 1.
+
+ if ensemble is None:
+ factor = dodumbocalibration(restraints)
+ else:
+ factor = calculateestimator(restraints, ensemble, calibration_settings,
+ use_cutoff=1.0)
+
+ if factor is None:
+ d_cutoff = calibration_settings['distance_cutoff']
+
+ s = 'Could not perform 1st calibration, since ' + \
+ 'no distances less than %.1f A were found in the ' + \
+ 'ensemble. Omitting distance-cutoff and ' + \
+ 'calibrating again...'
+
+ print(s % d_cutoff)
+
+ factor = calculateestimator(restraints, ensemble, calibration_settings,
+ use_cutoff=0.0)
+
+ return factor
+
+
+def filter_weights(weights, cutoff):
+ """
+ Let I be the index-list of weights whose
+ sum is >= cutoff. The function returns indices
+ in range(0, len(weights)) which are not in I
+ """
+
+ ## sort weights in descending order
+ indices = np.argsort(weights)
+ indices = np.take(indices, np.arange(len(indices) - 1, -1, -1))
+ s_weights = np.take(weights, indices)
+
+ x = np.add.accumulate(s_weights)
+
+ try:
+ index = np.flatnonzero(np.greater(x, cutoff))[1]
+ except Exception as _:
+ index = len(indices)
+
+ ## we limit the number of contributing
+ ## weights to max_n.
+
+ ## BARDIAUX
+ # test maxn remove peak
+
+ ## Return set of large and small weights.
+
+ return indices[:index], indices[index:]
+
+
+def assign(restraint_list, ensemble, all_settings, filter_contributions=1):
+ '''assign'''
+
+ def average1(x):
+ return np.sum(np.array(x), axis=0) / len(x)
+
+ def variance(x, avg=None):
+ if avg is None:
+ avg = average1(x)
+
+ return np.sum(np.power(np.array(x) - avg, 2), axis=0) / (len(x) - 1.)
+
+ def standarddeviation(x, avg=None):
+ return np.sqrt(variance(x, avg))
+
+ all_contributions = []
+ weights = []
+
+ for restraint in restraint_list:
+
+ distances = []
+
+ contributions = restraint['analysis']['contributions']
+ all_contributions.append(contributions)
+
+ for contribution in contributions:
+
+ ## for every structure: get effective distance
+ ## for 'contribution'
+
+ d = effective_distances(contribution, ensemble)
+
+ d_avg = np.average(d)
+ distances.append(d_avg)
+
+ if len(d) > 1:
+ sd = standarddeviation(d, avg=d_avg)
+ else:
+ sd = None
+
+ contribution['average_distance'] = [d_avg, sd]
+
+ ## calculate partial NOE wrt to ensemble-averaged
+ ## distance. The partial NOE serves as weight which
+ ## subsequently will be normalized to 1.
+
+ w = np.power(distances, -6.)
+
+ ## normalize weights and store weights
+
+ w /= np.sum(w)
+
+ weights.append(w)
+
+ settings = all_settings["assign"]
+ cutoff = settings['weight_cutoff']
+
+ # if cutoff is not None:
+ if cutoff is not None and filter_contributions:
+
+ ## 1. disable all contributions according to
+ ## the partial-assignment scheme.
+ ## 2. allow at most 'max_contributions' contributions
+
+ for i, _ in enumerate(weights):
+
+ w = weights[i]
+ c = all_contributions[i]
+
+ on, off = filter_weights(w, cutoff)
+
+ for index in off:
+ c[index]['weight'] = 0.
+ for index in on:
+ c[index]['weight'] = w[index]
+ else:
+
+ ## if cutoff is not set, enable all contribution.
+
+ ## note: setting 'max_contributions' does not
+ ## apply in that case since we have no rule
+ ## how to select contributions which remain
+ ## active.
+
+ for i, _ in enumerate(weights):
+
+ contributions = all_contributions[i]
+ w = weights[i]
+
+ for j, _ in enumerate(w):
+ contributions[j]["weight"] = w[j]
+
+
+def run_iteration(peaks, ensemble, all_settings):
+ '''run_iteration'''
+ if ensemble is None:
+ ## BARDIAUX
+ # test maxn remove peak
+ maxn = all_settings["assign"]['max_contributions']
+ for p in peaks:
+ for c in p['analysis']['contributions']:
+ if c is not None and isinstance(c.get('weight'), float) and c['weight'] > 0.:
+ activate = c
+ if len(activate) > maxn and not p['ref_peak']['reliable']:
+ p['active'] = 0
+
+ return peaks
+
+ else:
+
+ ## Calculate initial calibraton factor.
+
+ factor = docalibration(peaks, ensemble, all_settings)
+
+ ## Calculate upper/lower bounds for restraints of current
+ ## iteration.
+
+ t = time.time()
+
+ calculatebounds(factor, peaks, all_settings, ensemble=ensemble)
+
+ s = '1st calibration and calculation of new ' + \
+ 'distance-bounds done (calibration factor: %e)'
+ print(s % factor)
+ print('Time: %ss' % str(time.time() - t))
+
+ ##
+ ## Violation Analysis
+ ##
+ ## Assess every restraint regarding its degree of violation.
+ ##
+ ## Violated restraints will be disabled for the
+ ## current iteration and thus will not be used during
+ ## structure calculation.
+ ##
+
+ t = time.time()
+
+ violated, non_violated, new_non_violated = doviolationanalysis(peaks, ensemble, \
+ all_settings['violation_analysis'])
+
+ ## Augment set of non-violated restraints
+
+ if new_non_violated:
+ non_violated += new_non_violated
+
+ n = len(peaks)
+ n_viol = len(violated)
+ p_viol = n_viol * 100. / n
+
+ s = 'Violation analysis done: %d / %d restraints ' + \
+ '(%.1f %%) violated.'
+
+ print(s % (n_viol, n, p_viol))
+
+ if new_non_violated:
+ s = 'Number of valid restraints has been increased ' + \
+ 'by %d (%.1f%%) after applying a bound-correction.'
+
+ p_new = len(new_non_violated) * 100. / n
+
+ print(s % (len(new_non_violated), p_new))
+
+ print('Time: %ss' % str(time.time() - t))
+
+ ##
+ ## 2nd calibration - wrt to non-violated restraints.
+ ## If no restraints have been violated, we use the
+ ## 1st calibration factor.
+ ## Again, we do not store results.
+ ##
+
+ if non_violated:
+ factor = docalibration(non_violated, ensemble, all_settings)
+
+ ##
+ ## Activate restraints explicitly.
+ ## We consider a restraint as active, if it has
+ ## not been violated or if its reference cross-peak is
+ ## 'reliable'.
+ ##
+
+ for r in peaks:
+ if not r['analysis']['is_violated'] or r['ref_peak']['reliable']:
+ r['active'] = 1 # ?
+ else:
+ r['active'] = 0
+
+ ## Store final calibration factor for current iteration.
+
+ ## Calculate upper/lower bounds for restraint-list
+ ## used in the current iteration. I.e. these bounds
+ ## will be used to calculated the structures.
+
+ t = time.time()
+
+ calculatebounds(factor, peaks, all_settings, new_non_violated, ensemble=ensemble)
+
+ s = 'Final calibration and calculation of new distance-bounds' + \
+ ' done (calibration factor: %e).' % factor
+ print(s)
+ print('Time: %ss' % str(time.time() - t))
+ ##
+ ## Partial assignment for restraint-list used in
+ ## current iteration, i.e. for all restraints:
+ ## Calculate weight for every contribution
+ ## and (depends possibly on partial analyser
+ ## settings) throw away 'unlikely' contributions.
+ ##
+ ## If we do not have an ensemble, all contributions
+ ## are activated.
+ ##
+
+ t = time.time()
+
+ assign(peaks, ensemble, all_settings, filter_contributions=True)
+ print('Partial assignment done.')
+ print('Time: %ss' % str(time.time() - t))
+ return None
+
+
+def assign_iteration(ur_tuple_path, ur_path, pdb_path, peak_list_path, all_settings, filter_names=None):
+ '''assign_iteration'''
+ for path in [ur_tuple_path, ur_path]:
+ os.makedirs(path, exist_ok=True)
+
+ names = os.listdir(pdb_path)
+ names = [name for name in names if ".pdb" in name]
+ pdb_list = [os.path.join(pdb_path, name) for name in names]
+
+ prot_names = sorted(os.listdir(peak_list_path))
+
+ if filter_names:
+ prot_names = list(set(prot_names).intersection(set(list(filter_names))))
+
+ for prot_name in prot_names:
+ if prot_name == "2K0M":
+ continue # bad peak list
+ prot_path = os.path.join(peak_list_path, prot_name)
+ file_list = os.listdir(prot_path)
+
+ cur_pdb_list = [name for name in pdb_list if prot_name in name]
+
+ if not cur_pdb_list:
+ continue
+ ensembles = make_ensembles(cur_pdb_list)
+
+ peak_file_list = []
+ for file in file_list:
+ if file.startswith("new_spectrum"):
+ peak_file_list.append(file)
+
+ all_peak_list = []
+ for peak_file in peak_file_list:
+ full_peak_filename = os.path.join(prot_path, peak_file)
+ with open(full_peak_filename, "rb") as f:
+ peak_list = pickle.load(f)
+
+ print("\n\n", full_peak_filename, len(peak_list))
+ if not peak_list:
+ continue
+ run_iteration(peak_list, ensembles, all_settings)
+
+ all_peak_list.extend(peak_list)
+
+ ur_list, ur_list_tuple = get_ur_list2(all_peak_list, long_distance_threshold=0)
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(ur_path + "/" + prot_name + ".pkl", os_flags, os_modes), "wb") as f:
+ pickle.dump(ur_list, f)
+ with os.fdopen(os.open(ur_tuple_path + "/" + prot_name + ".pkl", os_flags, os_modes), "wb") as f:
+ pickle.dump(ur_list_tuple, f)
diff --git a/MindSPONGE/applications/research/FAAST/assign/init_assign.py b/MindSPONGE/applications/research/FAAST/assign/init_assign.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0ea08fb53423c1e7dc980927d888e9f1ee91500
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/assign/init_assign.py
@@ -0,0 +1,411 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"init_assign"
+import copy
+import os
+import stat
+import pickle
+import numpy as np
+
+from commons.res_constants import EQ_GROUPS, EQUI_VARIANCE, AA_3TO1, \
+ atom_template, peak_template
+
+
+def get_ur_list(peak_list, long_distance_threshold=0):
+ '''get_ur_list'''
+ new_peaks = copy.deepcopy(peak_list)
+ ur_list_tuple = []
+ ur_list = []
+ for peak in new_peaks:
+ if peak["active"] < 0.5:
+ continue
+ ori_contributions = peak.get("analysis").get("contributions")
+ new_contributions = []
+ for contribution in ori_contributions:
+ if contribution["weight"] > 0:
+ new_contributions.append(contribution)
+ peak.get("analysis")["contributions"] = new_contributions
+ if len(new_contributions) == 1:
+ res_idx1 = new_contributions[0].get("spin_pairs")[0].get("Atom1").get("res")
+ res_idx2 = new_contributions[0].get("spin_pairs")[0].get("Atom2").get("res")
+ atype1 = new_contributions[0].get("spin_pairs")[0].get("Atom1").get("name")
+ atype2 = new_contributions[0].get("spin_pairs")[0].get("Atom2").get("name")
+ if abs(res_idx1 - res_idx2) < long_distance_threshold:
+ continue
+ ur_list.append([[res_idx1, atype1], [[res_idx2, atype2]]])
+ if (res_idx1, res_idx2) not in ur_list and (res_idx2, res_idx1) not in ur_list_tuple:
+ ur_list_tuple.append((res_idx1, res_idx2))
+ ur_list.append([[res_idx1, atype1], [[res_idx2, atype2]]])
+ return ur_list, ur_list_tuple
+
+
+def ppm_shift(chem_shift, lb=-999.0,
+ ub=999.0):
+ return chem_shift - np.floor((chem_shift - lb) / (ub - lb)) * (ub - lb)
+
+
+def group_by_peak_num(peak_adrs):
+ '''group_by_peak_num'''
+ peak_adrs_grouped = {}
+ for peak in peak_adrs:
+ peak2 = copy.deepcopy(peak)
+ peak2[2] = list(peak2[2])[0]
+ peak2[4] = list(peak2[4])[0]
+ peak2[7] = list(peak2[7])[0]
+ for value in peak2:
+ value = str(value)
+ peak_num = int(peak2[0])
+ if peak_num not in peak_adrs_grouped:
+ peak_adrs_grouped[peak_num] = [tuple(peak2[1:])]
+ else:
+ peak_adrs_grouped.get(peak_num).append(tuple(peak2[1:]))
+
+ return peak_adrs_grouped
+
+
+def init_assign(cn_noe, noe_atype, atom_types, chem_shifts, atom_names, res_types, res_idxs, windows=None):
+ '''init_assign'''
+ if noe_atype not in ["C", "N"]:
+ raise ValueError("Only C or N are supported as noe_atype")
+
+ if not windows:
+ window_cn = [-999.0, 999.0]
+ window_h1 = [-999.0, 999.0]
+ window_h2 = [-999.0, 999.0]
+ else:
+ window_cn = windows[0]
+ window_h1 = windows[1]
+ window_h2 = windows[2]
+
+ spect_window = window_h2[1] - window_h2[0]
+ cn_shift_idxs = np.where([atom_type == noe_atype for atom_type in atom_types])[0]
+ peak_adrs = []
+ pair_num = []
+
+ for i, _ in enumerate(cn_shift_idxs):
+ cn_shift_ori = chem_shifts[cn_shift_idxs[i]]
+ cn_shift = ppm_shift(cn_shift_ori, window_cn[0], window_cn[1])
+ cn_name = atom_names[cn_shift_idxs[i]]
+ cn_eq_idx = np.where(
+ (EQ_GROUPS[:, 0] == AA_3TO1[res_types[cn_shift_idxs[i]]]) * \
+ ([cn_name in eqg for eqg in EQ_GROUPS[:, 2]]))[0]
+ if cn_eq_idx is None:
+ continue
+ cn_res_idx = res_idxs[cn_shift_idxs[i]]
+ h1_eq_group = EQ_GROUPS[cn_eq_idx[0]][3]
+ h1_idx = np.where((res_idxs == cn_res_idx) * ([atom_name in h1_eq_group for atom_name in atom_names]))[
+ 0]
+ h1_shift = ppm_shift(chem_shifts[h1_idx], window_h1[0], window_h1[1])
+ if h1_shift.shape[0] == 0:
+ continue
+ cn_h_pair_idx = np.where((np.abs(cn_noe[:, 0] - cn_shift) < 0.2) * ( \
+ np.min(np.abs(cn_noe[:, 2][None] - h1_shift[:, None]), axis=0) < 0.02))[0]
+ pair_num.append(len(cn_h_pair_idx))
+ for k, _ in enumerate(cn_h_pair_idx):
+ h2_shift = np.array([cn_noe[cn_h_pair_idx[k], 1], cn_noe[cn_h_pair_idx[k], 1] + spect_window,
+ cn_noe[cn_h_pair_idx[k], 1] - spect_window])
+ if h2_shift.shape[0] == 0:
+ continue
+ h2_idxs = \
+ np.where((atom_types == 'H') * (np.min(np.abs(chem_shifts[None] - h2_shift[:, None]), axis=0) < 0.02))[
+ 0]
+ for l, _ in enumerate(h2_idxs):
+ h2_eq_group = EQ_GROUPS[(EQ_GROUPS[:, 0] == AA_3TO1[res_types[h2_idxs[l]]]) * (
+ [atom_names[h2_idxs[l]] in eqg for eqg in EQ_GROUPS[:, 3]])][0][3]
+ peak_adrs.append(
+ [cn_h_pair_idx[k], cn_shift_idxs[i], EQ_GROUPS[cn_eq_idx[0]][2], h1_idx[0], h1_eq_group,
+ res_types[h1_idx[0]], h2_idxs[l], h2_eq_group, res_types[h2_idxs[l]]])
+
+ cn_res_idx = cn_res_idx
+
+ peak_adrs_uniq = []
+ for peak_single in peak_adrs:
+ if peak_single not in peak_adrs_uniq:
+ peak_adrs_uniq.append(peak_single)
+
+ peak_adrs_uniq = np.array(peak_adrs_uniq)
+
+ peak_adrs_grouped = group_by_peak_num(peak_adrs_uniq)
+
+ return peak_adrs_grouped
+
+
+def make_atom(name, res, atom_id, hetero_name, types, restype):
+ '''make_atom'''
+ atom = copy.deepcopy(atom_template)
+ atom["name"] = name
+ atom["res"] = res
+ atom["atom_id"] = atom_id
+ atom["hetero_name"] = hetero_name
+ atom["type"] = types
+ atom["restype"] = restype
+ return atom
+
+
+def make_peak_list(peak_adrs, noe, res_idxs, noe_list_percentile=None):
+ '''make_peak_list'''
+ peak_list = []
+
+ spin_pair_id = 0
+
+ num_contributions = []
+ for peak_num, assignments in peak_adrs.items():
+
+ peak_chem_shifts = noe[peak_num]
+
+ peak = copy.deepcopy(peak_template)
+
+ peak["peak_id"] = peak_num
+
+ proton1assignments = []
+ hetero1assignments = []
+ proton2assignments = []
+
+ existed_equi_atoms = []
+
+ for assignment in np.unique(np.array(assignments)[:, 1:5], axis=0):
+ # merge equivariance
+ res_idx = res_idxs[int(assignment[1])]
+ aname = assignment[2]
+ if [res_idx, aname] in existed_equi_atoms:
+ continue
+ cur_equivariance = EQUI_VARIANCE.get(assignment[3]).get(aname).get("equivariance")
+ existed_equi_atoms.extend([[res_idx, aname] for aname in cur_equivariance])
+ proton1 = make_atom(name=aname,
+ res=res_idx,
+ atom_id=int(assignment[1]),
+ hetero_name=assignment[0],
+ restype=assignment[3],
+ types="H")
+ proton1assignments.append({
+ 'type': 'automatic',
+ 'atoms': [proton1]
+ })
+
+ existed_equi_atoms = []
+ for assignment in np.unique(np.array(assignments)[:, 5:8], axis=0):
+ res_idx = res_idxs[int(assignment[0])]
+ aname = assignment[1]
+
+ if [res_idx, aname] in existed_equi_atoms:
+ continue
+ cur_equivariance = EQUI_VARIANCE.get(assignment[2]).get(aname).get("equivariance")
+ existed_equi_atoms.extend([[res_idx, aname] for aname in cur_equivariance])
+
+ proton2 = make_atom(name=aname,
+ res=res_idxs[int(assignment[0])],
+ atom_id=int(assignment[0]),
+ hetero_name="N",
+ restype=assignment[2],
+ types="H")
+ proton2assignments.append({
+ 'type': 'automatic',
+ 'atoms': [proton2]
+ })
+ for assignment in np.unique(np.array(assignments)[:, [0, 1, 3]], axis=0):
+ aname = assignment[1]
+ hetero1 = make_atom(name=aname,
+ res=res_idxs[int(assignment[0])],
+ atom_id=int(assignment[0]),
+ restype=assignment[2],
+ hetero_name=None,
+ types=aname)
+ hetero1assignments.append({
+ 'type': 'automatic',
+ 'atoms': [hetero1]
+ })
+
+ peak.get("ref_peak")["proton1assignments"] = proton1assignments
+ peak.get("ref_peak")["hetero1assignments"] = hetero1assignments
+ peak.get("ref_peak")["proton2assignments"] = proton2assignments
+ peak.get("ref_peak")['volume'] = [peak_chem_shifts[3], None]
+ peak.get("ref_peak")['intensity'] = [peak_chem_shifts[3], None]
+
+ if noe_list_percentile and peak_chem_shifts[3] > noe_list_percentile:
+ # if :
+ continue
+
+ peak.get("ref_peak")["proton1ppm"] = [peak_chem_shifts[2], None]
+ peak.get("ref_peak")["proton2ppm"] = [peak_chem_shifts[1], None]
+ peak.get("ref_peak")["hetero1ppm"] = [peak_chem_shifts[0], None]
+
+ contributions = []
+ for proton1 in proton1assignments:
+ for proton2 in proton2assignments:
+ if proton1["atoms"][0]["name"] in \
+ EQUI_VARIANCE.get(proton2.get("atoms")[0].get("restype")).get( \
+ proton2.get("atoms")[0].get("name")).get("equivariance"):
+ continue
+ contribution = {
+ 'figure_of_merit': None,
+ 'weight': 1.0,
+ 'average_distance': [None, None],
+ 'contribution_id': spin_pair_id,
+ 'type': 'fast_exchange',
+ 'spin_pairs': [{'id': spin_pair_id,
+ 'Atom1': proton1["atoms"][0],
+ 'Atom2': proton2["atoms"][0],
+ }],
+ }
+
+ contributions.append(contribution)
+ spin_pair_id += 1
+ num_contributions.append(len(contributions))
+ if not contributions:
+ continue
+ peak.get("analysis")["contributions"] = copy.deepcopy(contributions)
+
+ peak_list.append(peak)
+ return peak_list
+
+
+def get_ur_list2(peak_list, long_distance_threshold=0):
+ '''get_ur_list2'''
+ new_peaks = copy.deepcopy(peak_list)
+ ur_list_tuple = []
+ ur_list = []
+ for peak in new_peaks:
+ if peak.get("active") < 0.5:
+ continue
+ ori_contributions = peak.get("analysis").get("contributions")
+ new_contributions = []
+ for contribution in ori_contributions:
+ if contribution["weight"] > 0:
+ new_contributions.append(contribution)
+ peak.get("analysis")["contributions"] = new_contributions
+ if len(new_contributions) == 1:
+ res_idx1 = new_contributions[0].get("spin_pairs")[0].get("Atom1").get("res")
+ res_idx2 = new_contributions[0].get("spin_pairs")[0].get("Atom2").get("res")
+ atype1 = new_contributions[0].get("spin_pairs")[0].get("Atom1").get("name")
+ atype2 = new_contributions[0].get("spin_pairs")[0].get("Atom2").get("name")
+ if abs(res_idx1 - res_idx2) < long_distance_threshold:
+ continue
+ ur_list.append([[res_idx1, atype1], [[res_idx2, atype2]]])
+ if (res_idx1, res_idx2) not in ur_list and (res_idx2, res_idx1) not in ur_list_tuple:
+ ur_list_tuple.append((res_idx1, res_idx2))
+ ur_list.append([[res_idx1, atype1], [[res_idx2, atype2]]])
+ return ur_list, ur_list_tuple
+
+
+def load_noelist_from_txt(noe_file):
+ '''load_cs_from_txt'''
+ with open(noe_file, "r") as f:
+ data_txt_load = f.readlines()
+
+ noelist = []
+ for line in data_txt_load:
+ words = line.split()
+ try:
+ noelist.append([float(words[0]), float(words[1]), float(words[2]), float(words[3])])
+ except ValueError:
+ continue
+ noelist = np.array(noelist)
+
+ return noelist
+
+
+def load_cs_from_txt(cs_file):
+ '''load_cs_from_txt'''
+ with open(cs_file, "r") as f:
+ data_txt_load = f.readlines()
+
+ atom_names = []
+ atom_types = []
+ chem_shifts = []
+ res_idxs = []
+ res_types = []
+
+ for line in data_txt_load:
+ words = line.split()
+ try:
+ atom_name = words[0]
+ atom_type = words[1]
+ chem_shift = float(words[2])
+ res_idx = int(words[3])
+ res_type = words[4]
+ except ValueError:
+ continue
+ atom_names.append(atom_name)
+ atom_types.append(atom_type)
+ chem_shifts.append(chem_shift)
+ res_idxs.append(res_idx)
+ res_types.append(res_type)
+
+ res = [atom_names, atom_types, chem_shifts, res_idxs, res_types]
+ res = [np.array(array) for array in res]
+ return res
+
+
+def init_assign_call(prot_path):
+ '''init_assign_call'''
+ prot_name = prot_path.split("/")[-1]
+ file_list = os.listdir(prot_path)
+ noe_file_list = []
+ for file in file_list:
+ if file.split("/")[-1].startswith("noelist_"):
+ noe_file_list.append(file)
+
+ cs_file_path = os.path.join(prot_path, "chemical_shift_aligned.txt")
+ atom_names, atom_types, chem_shifts, res_idxs, res_types = load_cs_from_txt(cs_file_path)
+
+ all_peak_list = []
+ for file_id, noe_file in enumerate(noe_file_list):
+ noe_file_path = os.path.join(prot_path, noe_file)
+ noe_list = load_noelist_from_txt(noe_file_path)
+
+ noe_list_25percentile = np.percentile(noe_list[:, -1], 100)
+
+ windows = None
+ peak_adrs_grouped_asc = init_assign(cn_noe=noe_list,
+ noe_atype="C",
+ atom_types=atom_types,
+ chem_shifts=chem_shifts,
+ atom_names=atom_names,
+ res_types=res_types,
+ res_idxs=res_idxs,
+ windows=windows
+ )
+
+ peak_adrs_grouped_asn = init_assign(cn_noe=noe_list,
+ noe_atype="N",
+ atom_types=atom_types,
+ chem_shifts=chem_shifts,
+ atom_names=atom_names,
+ res_types=res_types,
+ res_idxs=res_idxs,
+ windows=windows
+ )
+ if len(peak_adrs_grouped_asc) > len(peak_adrs_grouped_asn):
+ peak_adrs_grouped = peak_adrs_grouped_asc
+ noe_atype = "C"
+ else:
+ noe_atype = "N"
+ peak_adrs_grouped = peak_adrs_grouped_asn
+
+ spectrum = make_peak_list(peak_adrs=peak_adrs_grouped,
+ noe=noe_list,
+ res_idxs=res_idxs,
+ noe_list_percentile=noe_list_25percentile)
+
+ print(noe_atype, " " * 5, prot_name, noe_file, len(noe_list), len(peak_adrs_grouped))
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(prot_path + f'/new_spectrum_{noe_atype}_{file_id}.pkl', os_flags, os_modes), "wb") as f:
+ pickle.dump(spectrum, f)
+ all_peak_list += spectrum
+
+ ur_list, ur_list_tuple = get_ur_list2(all_peak_list, long_distance_threshold=0)
+
+ return ur_list, ur_list_tuple
diff --git a/MindSPONGE/applications/research/FAAST/assign_settings.py b/MindSPONGE/applications/research/FAAST/assign_settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd2f0c51ae95929cfaa22b5f74f4cc2736a5ae14
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/assign_settings.py
@@ -0,0 +1,59 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"assign_settings"
+import copy
+
+settings = {}
+settings["calibration"] = {'volume_or_intensity': 'volume',
+ 'relaxation_matrix': 'no',
+ 'distance_cutoff': 6.0,
+ 'estimator': 'ratio_of_averages',
+ 'error_estimator': 'distance',
+ 'use_bounds': 'no'}
+settings["violation_analysis"] = {'violation_tolerance': 1.0,
+ 'lower_bound_correction': {
+ "enabled": "no",
+ "value": 0.0,
+ },
+ 'upper_bound_correction': {
+ "enabled": "no",
+ "value": 0.0,
+ },
+ 'violation_threshold': 0.5,
+ 'sigma_mode': 'fix'}
+settings["assign"] = {'max_contributions': 20,
+ 'weight_cutoff': 0.9}
+settings["infer_pdb"] = {'sample_ur_rate': 0.05,
+ "num_repeats": 20}
+
+assign_all_settings = {}
+
+settings.get("infer_pdb")["sample_ur_rate"] = 0.0
+settings.get("infer_pdb")["num_repeats"] = 1
+settings.get("assign")["weight_cutoff"] = 0.9
+settings["init_assign"] = True
+assign_all_settings[0] = copy.deepcopy(settings)
+
+settings.get("infer_pdb")["sample_ur_rate"] = 0.10
+settings.get("infer_pdb")["num_repeats"] = 20
+settings.get("assign")["weight_cutoff"] = 0.9
+settings["init_assign"] = False
+assign_all_settings[1] = copy.deepcopy(settings)
+
+settings.get("infer_pdb")["sample_ur_rate"] = 0.20
+settings.get("infer_pdb")["num_repeats"] = 20
+settings.get("assign")["weight_cutoff"] = 0.8
+settings["init_assign"] = False
+assign_all_settings[2] = copy.deepcopy(settings)
diff --git a/MindSPONGE/applications/research/FAAST/commons/analysis.py b/MindSPONGE/applications/research/FAAST/commons/analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..26c617bbf33a267a7733391c3b7d3ab4650ff713
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/commons/analysis.py
@@ -0,0 +1,479 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"analysis"
+import os
+import logging
+import io
+from typing import Optional
+import pickle
+import dataclasses
+from Bio.PDB import PDBParser
+import numpy as np
+
+from commons.res_constants import EQUI_VARIANCE
+import mindsponge.common.residue_constants as residue_constants
+
+log = logging.getLogger()
+log.setLevel(logging.ERROR)
+
+ATOM_TYPES_WITH_H = [
+ 'N', 'CA', 'C', 'CB', 'O', 'CG', 'CG1', 'CG2', 'OG', 'OG1', 'SG', 'CD',
+ 'CD1', 'CD2', 'ND1', 'ND2', 'OD1', 'OD2', 'SD', 'CE', 'CE1', 'CE2', 'CE3',
+ 'NE', 'NE1', 'NE2', 'OE1', 'OE2', 'CH2', 'NH1', 'NH2', 'OH', 'CZ', 'CZ2',
+ 'CZ3', 'NZ', 'OXT', 'HNZ2', 'HD2*', 'HG12', 'HH1*', 'HH', 'HH11', 'HNZ1',
+ 'HNE', 'HD*', 'HB1', 'HG2', 'HG23', 'HD22', 'HN', 'HB', 'HZ3', 'HE21',
+ 'HZ2', 'HA1', 'HH2*', 'HE2*', 'HG2*', 'HB2', 'HG22', 'HB*', 'HN22', 'HG*',
+ 'HSG', 'HE*', 'HE22', 'HE2', 'HA*', 'HA3', 'HD2', 'HH22', 'HNE1', 'HOG',
+ 'HZ1', 'HD1', 'HD12', 'HH21', 'HH12', 'HB3', 'H', 'HG3', 'HA', 'HN21',
+ 'HA2', 'HNZ3', 'HOH', 'HG1*', 'HD**', 'HH2', 'HE', 'HG**', 'HG21', 'HND1',
+ 'HD3', 'HH**', 'HD13', 'HG1', 'HD23', 'HG11', 'HZ', 'HG13', 'HNE2', 'HG',
+ 'HE1', 'HD11', 'HD21', 'HZ*', 'HE3', 'HNZ*', 'HD1*'
+]
+
+ATOM_ORDER_WITH_H = {atom_type: i for i, atom_type in enumerate(ATOM_TYPES_WITH_H)}
+
+ATOM_TYPE_NUM_WITH_H = len(ATOM_TYPES_WITH_H)
+
+RESNAME_TO_IDX = residue_constants.resname_to_idx
+
+IDX_TO_RESNAME = {val: key for key, val in RESNAME_TO_IDX.items()}
+
+
+@dataclasses.dataclass(frozen=True)
+class Protein:
+ """Protein structure representation."""
+
+ # Cartesian coordinates of atoms in angstroms. The atom types correspond to
+ # residue_constants.atom_types, i.e. the first three are N, CA, CB.
+ atom_positions: np.ndarray # [num_res, num_atom_type, 3]
+
+ # Amino-acid type for each residue represented as an integer between 0 and
+ # 20, where 20 is 'X'.
+ aatype: np.ndarray # [num_res]
+
+ # Binary float mask to indicate presence of a particular atom. 1.0 if an atom
+ # is present and 0.0 if not. This should be used for loss masking.
+ atom_mask: np.ndarray # [num_res, num_atom_type]
+
+ # Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
+ residue_index: np.ndarray # [num_res]
+
+ # B-factors, or temperature factors, of each residue (in sq. angstroms units),
+ # representing the displacement of the residue from its ground truth mean
+ # value.
+ b_factors: np.ndarray # [num_res, num_atom_type]
+
+
+def from_pdb_string_with_h(pdb_str: str, chain_id: Optional[str] = None) -> Protein:
+ """
+ Takes a PDB string and constructs a Protein object.
+ WARNING: All non-standard residue types will be converted into UNK. All
+ non-standard atoms will be ignored.
+
+ Args:
+ pdb_str: The contents of the pdb file
+ chain_id: If None, then the pdb file must contain a single chain (which
+ will be parsed). If chain_id is specified (e.g. A), then only that chain
+ is parsed.
+
+ Returns:
+ A new `Protein` parsed from the pdb contents.
+ """
+ pdb_fh = io.StringIO(pdb_str)
+ parser = PDBParser()
+ structure = parser.get_structure('none', pdb_fh)
+ models = list(structure.get_models())
+ models = models[:1]
+ if len(models) != 1:
+ raise ValueError(
+ f'Only single model PDBs are supported. Found {len(models)} models.')
+ model = models[0]
+
+ if chain_id is not None:
+ chain = model[chain_id]
+ else:
+ chains = list(model.get_chains())
+ chains = chains[:1]
+ if len(chains) != 1:
+ raise ValueError(
+ 'Only single chain PDBs are supported when chain_id not specified. '
+ f'Found {len(chains)} chains.')
+ else:
+ chain = chains[0]
+
+ atom_positions = []
+ aatype = []
+ atom_mask = []
+ residue_index = []
+ b_factors = []
+
+ atom_order_with_h = {atom_type: i for i, atom_type in enumerate(ATOM_TYPES_WITH_H)}
+ atom_type_num_with_h = len(ATOM_TYPES_WITH_H)
+ for res in chain:
+ if res.id[2] != ' ':
+ raise ValueError(
+ f'PDB contains an insertion code at chain {chain.id} and residue '
+ f'index {res.id[1]}. These are not supported.')
+ res_shortname = residue_constants.restype_3to1.get(res.resname, 'X')
+ restype_idx = residue_constants.restype_order.get(
+ res_shortname, residue_constants.restype_num)
+ pos = np.zeros((atom_type_num_with_h, 3))
+ mask = np.zeros((atom_type_num_with_h,))
+ res_b_factors = np.zeros((atom_type_num_with_h,))
+ for atom in res:
+ if atom.name not in ATOM_TYPES_WITH_H:
+ continue
+ pos[atom_order_with_h[atom.name]] = atom.coord
+ mask[atom_order_with_h[atom.name]] = 1.
+ res_b_factors[atom_order_with_h[atom.name]] = atom.bfactor
+ if np.sum(mask) < 0.5:
+ # If no known atom positions are reported for the residue then skip it.
+ continue
+ aatype.append(restype_idx)
+ atom_positions.append(pos)
+ atom_mask.append(mask)
+ residue_index.append(res.id[1])
+ b_factors.append(res_b_factors)
+
+ return Protein(
+ atom_positions=np.array(atom_positions),
+ atom_mask=np.array(atom_mask),
+ aatype=np.array(aatype),
+ residue_index=np.array(residue_index),
+ b_factors=np.array(b_factors))
+
+
+def check_restraints(pdb_path, restraints, distance_threshold=8, pdb_align=1, return_new_restraints=False):
+ '''check_restraints'''
+ pdb_file_path = os.path.join(pdb_path)
+ with open(pdb_file_path, 'r') as f:
+ prot_pdb = from_pdb_string_with_h(f.read())
+ aatype_all = prot_pdb.aatype
+ atom99_positions = prot_pdb.atom_positions.astype(np.float32)
+ all_atom_mask = prot_pdb.atom_mask.astype(np.float32)
+
+ error_count = 0
+ hdist_all = []
+ ur_tuple_new = []
+ for res1, candidates in restraints:
+ try:
+ i, atype1 = res1
+ if i >= aatype_all.shape[0]:
+ continue
+ aatype1 = IDX_TO_RESNAME[aatype_all[i - pdb_align]]
+ mask1 = all_atom_mask[i - pdb_align]
+ pos1 = atom99_positions[i - pdb_align]
+ if atype1 not in EQUI_VARIANCE.get(aatype1).keys():
+ error_count += 1
+ continue
+
+ h1_atom = EQUI_VARIANCE.get(aatype1).get(atype1).get("equivariance")
+
+ h_dist = 999.0
+ for res2 in candidates:
+ j, atype2 = res2
+ if j >= aatype_all.shape[0]:
+ continue
+ aatype2 = IDX_TO_RESNAME[aatype_all[j - pdb_align]]
+ pos2 = atom99_positions[j - pdb_align]
+ mask2 = all_atom_mask[j - pdb_align]
+ if atype2 not in EQUI_VARIANCE.get(aatype2).keys():
+ error_count += 1
+ continue
+
+ h2_atom = EQUI_VARIANCE.get(aatype2).get(atype2).get("equivariance")
+
+ h1_idx = np.array([ATOM_ORDER_WITH_H[h1s] for h1s in h1_atom])
+ h2_idx = np.array([ATOM_ORDER_WITH_H[h2s] for h2s in h2_atom])
+
+ mask = mask1[None, :] * mask2[:, None]
+ h_dists = np.sqrt((np.square(pos1[h1_idx, :][None] - pos2[h2_idx, :][:, None])).sum(-1) + 1e-8)
+
+ h_dists = h_dists * mask[h2_idx][:, h1_idx]
+ h_dists[mask[h2_idx][:, h1_idx] == 0] = 999.0
+ h_dist = min(np.min(h_dists), h_dist)
+
+ if h_dist < distance_threshold:
+ ur_tuple_new.append((i, j))
+
+ hdist_all.append(h_dist)
+ except Exception as _:
+ continue
+
+ if not restraints:
+ num_restraints, good_num, aatype_all.shape[0], hdist_all, good_rate, ur_per_res = 0, 0, aatype_all.shape[
+ 0], hdist_all, 1, 0
+ else:
+ hdist_all = np.array(hdist_all)
+ ok_num = np.sum(hdist_all < distance_threshold)
+ num_restraints = len(hdist_all)
+ good_num = ok_num
+ good_rate = round(ok_num / len(hdist_all), 4)
+ ur_per_res = round(len(hdist_all) / aatype_all.shape[0], 2)
+ if return_new_restraints:
+ stats = num_restraints, good_num, aatype_all.shape[0], hdist_all, good_rate, ur_per_res, ur_tuple_new
+ else:
+ stats = num_restraints, good_num, aatype_all.shape[0], hdist_all, good_rate, ur_per_res
+ return stats
+
+
+def replace_q_in_atype(atype):
+ '''replace_q_in_atype'''
+ if "Q" in atype:
+ atype = "H" + atype[1:] + "*"
+
+ return atype
+
+
+def remove_duplicates(restraints):
+ '''remove_duplicates'''
+ output_restraints = []
+ for i, atype1, j, atype2 in restraints:
+ if i > j:
+ i, atype1, j, atype2 = j, atype2, i, atype1
+
+ output_restraints.append([i, atype1, j, atype2])
+
+ output_restraints = list(set([tuple(t) for t in output_restraints]))
+ return output_restraints
+
+
+def preprocess_restraints(restraints, num_gap=0, index_distance_threshold=-1):
+ '''preprocess_restraints'''
+ restraints = remove_duplicates(restraints)
+ restraints_new = []
+ for i, atype1, j, atype2 in restraints:
+ atype1 = replace_q_in_atype(atype1)
+ atype2 = replace_q_in_atype(atype2)
+ i = i - num_gap
+ j = j - num_gap
+ if i < 1 or j < 1 or atype1 not in ATOM_TYPES_WITH_H or atype2 not in ATOM_TYPES_WITH_H:
+ continue
+ if abs(i - j) >= index_distance_threshold:
+ restraints_new.append([[i, atype1], [[j, atype2]]])
+ return restraints_new
+
+
+def confidence(filename):
+ '''confidence'''
+ with open(filename, "r") as f:
+ content = f.readlines()
+ confidences = []
+ for line in content:
+ words = line.split()
+ if len(words) > 3 and words[2] == "CA":
+ confidences.append(float(words[-2]))
+ avg_conf = sum(confidences) / len(confidences)
+ return round(avg_conf, 3)
+
+
+def select_pdb_by_conf(local_pdb_paths, return_conf=False):
+ '''select_pdb_by_conf'''
+ output_path_name = None
+ max_conf = -100
+ confs_all = []
+ for name in local_pdb_paths:
+ conf = confidence(name)
+ if conf > max_conf:
+ output_path_name = name
+ max_conf = conf
+ confs_all.append(conf)
+ if return_conf:
+ return output_path_name, np.max(confs_all), np.median(confs_all)
+ return output_path_name
+
+
+def gtur_vs_gtpdb(gtur_path, gtpdb_path, gap_nums=None, filter_names=None):
+ '''gtur_vs_gtpdb'''
+ prot_names = os.listdir(gtur_path)
+ prot_names = [name.split(".")[0] for name in prot_names]
+ if gap_nums:
+ prot_names = list(set(prot_names).intersection(set(list(gap_nums.keys()))))
+ if filter_names:
+ prot_names = list(set(prot_names).intersection(set(list(filter_names))))
+ prot_names.sort()
+ outputs = []
+ for prot_name in prot_names:
+
+ if gap_nums and prot_name in gap_nums:
+ num_gap = gap_nums[prot_name]
+ else:
+ num_gap = 0
+ local_pdb_path = os.path.join(gtpdb_path, prot_name, prot_name + ".pdb")
+ with open(os.path.join(gtur_path, f"{prot_name}.pkl"), "rb") as f:
+ restraints = pickle.load(f)
+
+ restraints_new = preprocess_restraints(restraints, num_gap=num_gap, index_distance_threshold=0)
+ stats_0 = check_restraints(local_pdb_path, restraints_new, 6)
+
+ restraints_new = preprocess_restraints(restraints, num_gap=num_gap, index_distance_threshold=4)
+ stats_4 = check_restraints(local_pdb_path, restraints_new, 6)
+
+ outputs.append([prot_name] + list(stats_0) + list(stats_4))
+
+ return np.array(outputs)
+
+
+def predur_vs_gtpdb(predur_path, gtpdb_path, filter_names=None):
+ '''predur_vs_gtpdb'''
+ prot_names = os.listdir(predur_path)
+ prot_names = [name.split(".")[0] for name in prot_names]
+ if filter_names:
+ prot_names = list(set(prot_names).intersection(set(list(filter_names))))
+ prot_names.sort()
+
+ outputs = []
+ for prot_name in prot_names:
+ if prot_name in ["2K0M"]:
+ continue
+
+ local_pdb_path = os.path.join(gtpdb_path, prot_name, prot_name + ".pdb")
+
+ local_ur_path = os.path.join(predur_path, prot_name + ".pkl")
+
+ with open(local_ur_path, "rb") as f:
+ restraints_ori = pickle.load(f)
+
+ restraints = []
+ restraints_ori.sort()
+
+ for res1, candidates in restraints_ori:
+ i, atype1 = res1
+ for res2 in candidates:
+ j, atype2 = res2
+ restraints.append([i, atype1, j, atype2])
+
+ restraints_new = preprocess_restraints(restraints, index_distance_threshold=0)
+ stats_0 = check_restraints(local_pdb_path, restraints_new, 6)
+
+ restraints_new = preprocess_restraints(restraints, index_distance_threshold=4)
+ stats_4 = check_restraints(local_pdb_path, restraints_new, 6)
+
+ outputs.append([prot_name] + list(stats_0) + list(stats_4))
+
+ return np.array(outputs)
+
+
+def gtur_vs_predpdb(gtur_path, predpdb_path, gap_nums=None, filter_names=None):
+ '''gtur_vs_predpdb'''
+ ur_path = gtur_path
+ pdb_path = predpdb_path
+
+ all_pdb_names = os.listdir(pdb_path)
+ pdb_names_dict = {}
+ for pdb_name in all_pdb_names:
+ short_name = pdb_name.split("_")[0]
+ pdb_full_path = os.path.join(pdb_path, pdb_name)
+ pdb_names_dict[short_name] = pdb_names_dict.get(short_name, []) + [pdb_full_path]
+
+ prot_names = os.listdir(ur_path)
+ prot_names = [name.split(".")[0] for name in prot_names]
+
+ prot_names = list(set(prot_names).intersection(set(list(pdb_names_dict.keys()))))
+ if gap_nums:
+ prot_names = list(set(prot_names).intersection(set(list(gap_nums.keys()))))
+ if filter_names:
+ prot_names = list(set(prot_names).intersection(set(list(filter_names))))
+
+ prot_names.sort()
+
+ outputs = []
+ for prot_name in prot_names:
+
+ if gap_nums and prot_name in gap_nums:
+ num_gap = gap_nums[prot_name]
+ else:
+ num_gap = 0
+
+ local_pdb_path, conf_max, conf_median = select_pdb_by_conf(pdb_names_dict.get(prot_name), return_conf=True)
+
+ with open(os.path.join(ur_path, f"{prot_name}.pkl"), "rb") as f:
+ restraints = pickle.load(f)
+
+ restraints_new = preprocess_restraints(restraints, num_gap=num_gap, index_distance_threshold=0)
+ stats_0 = check_restraints(local_pdb_path, restraints_new, 6)
+
+ restraints_new = preprocess_restraints(restraints, num_gap=num_gap, index_distance_threshold=4)
+ stats_4 = check_restraints(local_pdb_path, restraints_new, 6)
+ outputs.append([prot_name] + list(stats_0) + list(stats_4) + [conf_max, conf_median])
+
+ return np.array(outputs)
+
+
+def predur_vs_predpdb(predur_path, predpdb_path, filter_names=None, return_conf=False):
+ '''predur_vs_predpdb'''
+ ur_path = predur_path
+ pdb_path = predpdb_path
+
+ all_pdb_names = os.listdir(pdb_path)
+ print(all_pdb_names)
+ pdb_names_dict = {}
+ for pdb_name in all_pdb_names:
+ short_name = pdb_name.split("_")[0]
+ pdb_full_path = os.path.join(pdb_path, pdb_name)
+ pdb_names_dict[short_name] = pdb_names_dict.get(short_name, []) + [pdb_full_path]
+
+ prot_names = os.listdir(ur_path)
+ prot_names = [name.split(".")[0] for name in prot_names]
+ print("assign=", prot_names, filter_names, pdb_names_dict.keys())
+ if filter_names:
+ prot_names = list(set(prot_names).intersection(set(list(filter_names))))
+ prot_names = list(set(prot_names).intersection(set(list(pdb_names_dict.keys()))))
+ prot_names.sort()
+
+ outputs = []
+ conf_all = {}
+ print("after===", prot_names)
+ for prot_name in prot_names:
+ print(prot_name)
+
+ local_pdb_path, _, conf_median = select_pdb_by_conf(pdb_names_dict.get(prot_name), return_conf=True)
+ local_ur_path = os.path.join(ur_path, prot_name + ".pkl")
+ with open(local_ur_path, "rb") as f:
+ restraints_ori = pickle.load(f)
+
+ restraints = []
+ restraints_ori.sort()
+ for res1, candidates in restraints_ori:
+ i, atype1 = res1
+ for res2 in candidates:
+ j, atype2 = res2
+ restraints.append([i, atype1, j, atype2])
+
+ restraints_new = preprocess_restraints(restraints, index_distance_threshold=0)
+ stats_0 = check_restraints(local_pdb_path, restraints_new, 6)
+ restraints_new = preprocess_restraints(restraints, index_distance_threshold=4)
+ stats_4 = check_restraints(local_pdb_path, restraints_new, 6)
+ outputs.append([prot_name] + list(stats_0) + list(stats_4))
+ conf_all[prot_name] = conf_median
+ if return_conf:
+ return np.array(outputs), conf_all
+ return np.array(outputs)
+
+
+def filter_ur_with_pdb(restraints, pdb_path, distance_threshold=12, pdb_align=1):
+ '''filter_ur_with_pdb'''
+ restraints_new = []
+ for res1, candidates in restraints:
+ i, atype1 = res1
+ for res2 in candidates:
+ j, atype2 = res2
+ restraints_new.append([i, atype1, j, atype2])
+
+ restraints_new = preprocess_restraints(restraints_new, 0)
+ stats = check_restraints(pdb_path, restraints_new, distance_threshold, pdb_align, return_new_restraints=True)
+ ur_tuple_filtered = stats[-1]
+
+ return ur_tuple_filtered
diff --git a/MindSPONGE/applications/research/FAAST/commons/nmr_hydrogen_equivariance.txt b/MindSPONGE/applications/research/FAAST/commons/nmr_hydrogen_equivariance.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e351549cb85581ed808f4a2b0466951d00943968
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/commons/nmr_hydrogen_equivariance.txt
@@ -0,0 +1,102 @@
+A N N H,HN
+A C CA HA
+A C CB HB1,HB2,HB3,HB*
+C N N H,HN
+C C CA HA
+C C CB HB1,HB2,HB3,HB*
+C S SG HG,HSG
+D N N H,HN
+D C CA HA
+D C CB HB1,HB2,HB3,HB*
+D O OD1,OD2 HD2
+E N N H,HN
+E C CA HA
+E C CB HB1,HB2,HB3,HB*
+E C CG HG1,HG2,HG3,HG*
+E O OE1,OE2 HE2
+F N N H,HN
+F C CA HA
+F C CB HB1,HB2,HB3,HB*
+F C CD1,CD2 HD1,HD2,HD*,HD2*,HD1*
+F C CE1,CE2 HE1,HE2,HE*
+F C CZ HZ
+G N N H,HN
+G C CA HA1,HA2,HA3,HA*
+H N N H,HN
+H N ND1 HD1,HND1
+H N NE2 HE2,HNE2
+H C CA HA
+H C CB HB1,HB2,HB3,HB*
+H C CD2 HD2
+H C CE1 HE1
+I N N H,HN
+I C CA HA
+I C CB HB
+I C CG2 HG21,HG22,HG23,HG2*
+I C CG1 HG11,HG12,HG13,HG1*
+I C CD1 HD11,HD12,HD13,HD1*,HD**
+K N N H,HN
+K N NZ HZ1,HZ2,HZ3,HNZ1,HNZ2,HNZ3,HZ*,HNZ*
+K C CA HA
+K C CB HB1,HB2,HB3,HB*
+K C CG HG1,HG2,HG3,HG*
+K C CD HD1,HD2,HD3,HD*
+K C CE HE1,HE2,HE3,HE*
+L N N H,HN
+L C CA HA
+L C CB HB1,HB2,HB3,HB*
+L C CG HG
+L C CD1,CD2 HD11,HD12,HD13,HD1*,HD21,HD22,HD23,HD2*,HD**,HD*
+M N N H,HN
+M C CA HA
+M C CB HB1,HB2,HB3,HB*
+M C CG HG1,HG2,HG3,HG*
+M C CE HE1,HE2,HE3,HE*
+N N N H,HN
+N N ND2 HD21,HD22,HN21,HN22,HD2*
+N C CA HA
+N C CB HB1,HB2,HB3,HB*
+P C CA HA
+P C CB HB1,HB2,HB3,HB*
+P C CG HG1,HG2,HG3,HG*
+P C CD HD1,HD2,HD3,HD*
+Q N N H,HN
+Q N NE2 HE21,HE22,HE2*,HE*
+Q C CA HA
+Q C CB HB1,HB2,HB3,HB*
+Q C CG HG1,HG2,HG3,HG*
+R N N H,HN
+R N NE HE,HNE
+R N NH1,NH2 HH11,HH12,HH1*,HH21,HH22,HH2*,HH**
+R C CA HA
+R C CB HB1,HB2,HB3,HB*
+R C CG HG1,HG2,HG3,HG*
+R C CD HD1,HD2,HD3,HD*
+S N N H,HN
+S C CA HA
+S C CB HB1,HB2,HB3,HB*
+S O OG HG,HOG
+T N N H,HN
+T C CA HA
+T C CB HB
+T C CG2 HG21,HG22,HG23,HG2*,HG*
+T O OG1 HG1
+V N N H,HN
+V C CA HA
+V C CB HB
+V C CG1,CG2 HG11,HG12,HG13,HG1*,HG21,HG22,HG23,HG2*,HG**,HG*
+W N N H,HN
+W N NE1 HE1,HNE1
+W C CA HA
+W C CB HB1,HB2,HB3,HB*
+W C CD1 HD1
+W C CE3 HE3
+W C CZ2 HZ2
+W C CZ3 HZ3
+W C CH2 HH2
+Y N N H,HN
+Y C CA HA
+Y C CB HB1,HB2,HB3,HB*
+Y C CD1,CD2 HD1,HD2,HD*
+Y C CE1,CE2 HE1,HE2,HE*
+Y O OH HH,HOH
diff --git a/MindSPONGE/applications/research/FAAST/commons/res_constants.py b/MindSPONGE/applications/research/FAAST/commons/res_constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..c06dab4962c7f9560f5749c3fede1ac261702dca
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/commons/res_constants.py
@@ -0,0 +1,180 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"res_constants"
+import numpy as np
+
+with open('./commons/nmr_hydrogen_equivariance.txt', 'r') as f:
+ lines = f.readlines()
+EQ_GROUPS = []
+for line in lines:
+ lsp = line.split()
+ eqg = [lsp[0], lsp[1], set(lsp[2].split(',')), set(lsp[3].split(','))]
+ EQ_GROUPS.append(eqg)
+EQ_GROUPS = np.array(EQ_GROUPS)
+
+restype_1to3 = {
+ 'A': 'ALA',
+ 'R': 'ARG',
+ 'N': 'ASN',
+ 'D': 'ASP',
+ 'C': 'CYS',
+ 'Q': 'GLN',
+ 'E': 'GLU',
+ 'G': 'GLY',
+ 'H': 'HIS',
+ 'I': 'ILE',
+ 'L': 'LEU',
+ 'K': 'LYS',
+ 'M': 'MET',
+ 'F': 'PHE',
+ 'P': 'PRO',
+ 'S': 'SER',
+ 'T': 'THR',
+ 'W': 'TRP',
+ 'Y': 'TYR',
+ 'V': 'VAL',
+}
+
+AA_3TO1 = {val: key for key, val in restype_1to3.items()}
+
+atom_types = []
+EQUI_VARIANCE = {}
+for line in lines:
+ words = line.split()
+ if len(words) < 4 or words[0] == "##":
+ continue
+ aatype, hetero_atype, hetero_aname, equivariance = words
+
+ equivariance = equivariance.split(",")
+ aatype = restype_1to3.get(aatype)
+
+ atom_types.extend(equivariance)
+
+ if aatype not in EQUI_VARIANCE.keys():
+ EQUI_VARIANCE[aatype] = {}
+
+ for hname in equivariance:
+ EQUI_VARIANCE.get(aatype)[hname] = {
+ "equivariance": equivariance,
+ "hetero_info": [hetero_atype, hetero_aname]
+ }
+
+atom_template = {'name': 'HA',
+ 'res': 'MET10',
+ 'atom_id': 169,
+ 'segid': ' ',
+ 'hetero_name': 'CA',
+ 'type': 'H'}
+
+peak_template = {'distance': None,
+ 'peak_id': 2271,
+ 'upper_bound': None,
+ 'lower_bond': None,
+ 'weight': 1.0,
+ 'active': 1,
+ 'merged': 0,
+ 'ref_peak': {'volume': [21180.0, 0.0], 'intensity': [21180.0, 0.0], 'number': 2, 'ref_id': None,
+ 'proton2ppm': [4.391, None],
+ 'hetero2ppm': [None, None],
+ 'proton1ppm': [8.875, None],
+ 'hetero1ppm': [119.917, None],
+ 'reliable': False,
+ 'proton1assignments': [{'type': 'automatic', 'atoms': [
+ {'name': 'H', 'res': 'MET10', 'atom_id': 168, 'segid': ' ', 'hetero_name': 'N',
+ 'type': 'H'}]}],
+ 'hetero1assignments': [{'type': 'automatic', 'atoms': [
+ {'name': 'N', 'res': 'MET10', 'atom_id': 177, 'segid': ' ', 'hetero_name': None,
+ 'type': 'N'}]}],
+ 'hetero2assignments': [],
+ 'proton2assignments': [
+ {'type': 'automatic', 'atoms': [
+ {'name': 'HA', 'res': 'MET10', 'atom_id': 169, 'segid': ' ',
+ 'hetero_name': 'CA', 'type': 'H'}]},
+ {'type': 'automatic', 'atoms': [
+ {'name': 'HB', 'res': 'THR18', 'atom_id': 308, 'segid': ' ',
+ 'hetero_name': 'CB', 'type': 'H'}]},
+ {'type': 'automatic', 'atoms': [
+ {'name': 'HA', 'res': 'VAL37', 'atom_id': 628, 'segid': ' ',
+ 'hetero_name': 'CA', 'type': 'H'}]},
+ {'type': 'automatic', 'atoms': [
+ {'name': 'HA', 'res': 'ASP57', 'atom_id': 940, 'segid': ' ',
+ 'hetero_name': 'CA', 'type': 'H'}]},
+ {'type': 'automatic', 'atoms': [
+ {'name': 'HA', 'res': 'ASP83', 'atom_id': 1373, 'segid': ' ',
+ 'hetero_name': 'CA', 'type': 'H'}]}],
+ },
+
+ 'analysis': {'average_distance': [None, None],
+ 'lower_bound_violation': [None, None],
+ 'is_violated': None,
+ 'degree_of_violation': None,
+ 'figure_of_merit': [None, None],
+ 'model_peak_size': [None, None],
+ 'upper_bound_violation': [None, None],
+ 'contributions': [{'figure_of_merit': None,
+ 'weight': None,
+ 'average_distance': [None, None],
+ 'contribution_id': 17174,
+ 'spin_pairs': [{'id': 19893,
+ 'Atom2': {'name': 'HA', 'res': 'MET10', 'atom_id': 169,
+ 'segid': ' ', 'hetero_name': 'CA',
+ 'type': 'H'},
+ 'Atom1': {'name': 'H', 'res': 'MET10', 'atom_id': 168,
+ 'segid': ' ', 'hetero_name': 'N',
+ 'type': 'H'}}],
+ 'type': 'fast_exchange'},
+ {'figure_of_merit': None,
+ 'weight': None,
+ 'average_distance': [None, None],
+ 'contribution_id': 17175,
+ 'spin_pairs': [{'id': 19894,
+ 'Atom2': {'name': 'HB', 'res': 'THR18', 'atom_id': 308,
+ 'segid': ' ', 'hetero_name': 'CB',
+ 'type': 'H'},
+ 'Atom1': {'name': 'H', 'res': 'MET10', 'atom_id': 168,
+ 'segid': ' ', 'hetero_name': 'N',
+ 'type': 'H'}}],
+ 'type': 'fast_exchange'},
+ {'figure_of_merit': None, 'weight': None,
+ 'average_distance': [None, None], 'contribution_id': 17176,
+ 'spin_pairs': [{'id': 19895,
+ 'Atom2': {'name': 'HA', 'res': 'VAL37', 'atom_id': 628,
+ 'segid': ' ', 'hetero_name': 'CA',
+ 'type': 'H'},
+ 'Atom1': {'name': 'H', 'res': 'MET10', 'atom_id': 168,
+ 'segid': ' ', 'hetero_name': 'N',
+ 'type': 'H'}}], 'type': 'fast_exchange'},
+ {'figure_of_merit': None, 'weight': None,
+ 'average_distance': [None, None], 'contribution_id': 17177,
+ 'spin_pairs': [{'id': 19896,
+ 'Atom2': {'name': 'HA', 'res': 'ASP57', 'atom_id': 940,
+ 'segid': ' ', 'hetero_name': 'CA',
+ 'type': 'H'},
+ 'Atom1': {'name': 'H', 'res': 'MET10', 'atom_id': 168,
+ 'segid': ' ', 'hetero_name': 'N',
+ 'type': 'H'}}], 'type': 'fast_exchange'},
+ {'figure_of_merit': None, 'weight': None,
+ 'average_distance': [None, None], 'contribution_id': 17178,
+ 'spin_pairs': [{'id': 19897, 'Atom2': {'name': 'HA', 'res': 'ASP83',
+ 'atom_id': 1373,
+ 'segid': ' ',
+ 'hetero_name': 'CA',
+ 'type': 'H'},
+ 'Atom1': {'name': 'H', 'res': 'MET10', 'atom_id': 168,
+ 'segid': ' ', 'hetero_name': 'N',
+ 'type': 'H'}}], 'type': 'fast_exchange'}],
+ },
+
+ }
diff --git a/MindSPONGE/applications/research/FAAST/config/data.yaml b/MindSPONGE/applications/research/FAAST/config/data.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f575c1931feb7f911c14058b0af83c071a7c68f9
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/config/data.yaml
@@ -0,0 +1,88 @@
+block_deletion:
+ msa_fraction_per_block: 0.3
+ num_blocks: 5
+ randomize_num_blocks: True
+common:
+ random_recycle: False
+ distillation: False
+ replace_proportion: 0.0
+ masked_msa:
+ use_masked_msa: True
+ profile_prob: 0.1
+ same_prob: 0.1
+ uniform_prob: 0.1
+ max_extra_msa: 512
+ msa_cluster_features: True
+ num_recycle: 4
+ reduce_msa_clusters_by_max_templates: True
+ resample_msa_in_recycling: True
+ use_templates: True
+ template_features:
+ - template_all_atom_positions
+ - template_sum_probs
+ - template_aatype
+ - template_all_atom_masks
+ - template_domain_names
+ unsupervised_features:
+ - aatype
+ - residue_index
+ - sequence
+ - msa
+ - domain_name
+ - num_alignments
+ - seq_length
+ - between_segment_residues
+ - deletion_matrix
+ - template_all_atom_positions
+ - template_sum_probs
+ - template_aatype
+ - template_all_atom_masks
+ - template_domain_names
+ supervised_features:
+ - all_atom_positions
+ - all_atom_mask
+ - atom14_atom_exists
+ - atom14_gt_exists
+ - atom14_gt_positions
+ - residx_atom14_to_atom37
+ - residx_atom37_to_atom14
+ - atom37_atom_exists
+ - atom14_alt_gt_positions
+ - atom14_alt_gt_exists
+ - atom14_atom_is_ambiguous
+ - rigidgroups_gt_frames
+ - rigidgroups_gt_exists
+ - rigidgroups_group_exists
+ - rigidgroups_group_is_ambiguous
+ - rigidgroups_alt_gt_frames
+ - backbone_affine_tensor
+ - torsion_angles_sin_cos
+ - alt_torsion_angles_sin_co
+ - torsion_angles_mask
+ - pseudo_beta
+ - pseudo_beta_mask
+ - chi_mask
+ - backbone_affine_mask
+
+
+eval:
+ crop_size: 256
+ fixed_size: True
+ masked_msa_replace_fraction: 0.15
+ max_msa_clusters: 512
+ max_templates: 4
+ num_ensemble: 1
+ subsample_templates: True
+ keep_extra: True
+
+database_search:
+ hhsearch_binary_path: ./hh-suite/build/bin/hhsearch
+ kalign_binary_path: ./bin/kalign
+ pdb70_database_path: None
+ mmcif_dir: None
+ obsolete_pdbs_path: None
+ max_template_date: "2100-01-01"
+ mmseqs_binary: None
+ uniref30_path: None
+ database_envdb_dir: None
+ a3m_result_path: "./a3m_result/"
diff --git a/MindSPONGE/applications/research/FAAST/config/model.yaml b/MindSPONGE/applications/research/FAAST/config/model.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..01b95f9aca317466bc98ac6f20359bb335ac13cb
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/config/model.yaml
@@ -0,0 +1,678 @@
+is_training: False
+msa_channel: 256
+pair_channel: 128
+extra_msa_channel: 64
+max_relative_feature: 32
+recycle_features: True
+recycle_pos: True
+seq_channel: 384
+ascend:
+ lr: 0.0001
+GPU:
+ lr_max: 0.001
+ lr_min: 0.0001
+ warmup_steps: 1000
+ start_step: 0
+ lr_decay_steps: 75000
+prev_pos:
+ min_bin: 3.25
+ max_bin: 20.75
+ num_bins: 15
+common:
+ target_feat_dim: 22
+ msa_feat_dim: 49
+ dgram_dim: 15
+ pair_in_dim: 65
+ msa_first_row_dim: 256
+ prev_pair_dim: 128
+ extra_msa_dim: 25
+ template_feat_dim: 57
+template:
+ enabled: True
+ embed_torsion_angles: True
+ use_template_unit_vector: True
+ attention:
+ gating: False
+ key_dim: 64
+ num_head: 4
+ value_dim: 64
+ dgram_features:
+ min_bin: 3.25
+ max_bin: 50.75
+ num_bins: 39
+ template_pair_stack:
+ num_block: 2
+ triangle_attention_starting_node:
+ dropout_rate: 0.25
+ gating: True
+ key_dim: 64
+ num_head: 4
+ orientation: 'per_row'
+ shared_dropout: True
+ value_dim: 64
+ triangle_attention_ending_node:
+ dropout_rate: 0.25
+ gating: True
+ key_dim: 64
+ num_head: 4
+ orientation: 'per_column'
+ shared_dropout: True
+ value_dim: 64
+ triangle_multiplication_outgoing:
+ dropout_rate: 0.25
+ equation: 'ikc,jkc->ijc'
+ num_intermediate_channel: 64
+ orientation: 'per_row'
+ shared_dropout: True
+ triangle_multiplication_incoming:
+ dropout_rate: 0.25
+ equation: 'kjc,kic->ijc'
+ num_intermediate_channel: 64
+ orientation: 'per_row'
+ shared_dropout: True
+ pair_transition:
+ dropout_rate: 0.0
+ num_intermediate_factor: 2
+ orientation: 'per_row'
+ shared_dropout: True
+evoformer:
+ msa_stack_num: 48
+ extra_msa_stack_num: 4
+ msa_stack_num_assessment: 8
+ extra_msa_stack_num_assessment: 4
+ msa_row_attention_with_pair_bias:
+ dropout_rate: 0.15 # 0.15
+ gating: True
+ num_head: 8
+ orientation: 'per_row'
+ shared_dropout: True
+ msa_column_attention:
+ dropout_rate: 0.0
+ gating: True
+ num_head: 8
+ orientation: 'per_column'
+ shared_dropout: True
+ msa_transition:
+ dropout_rate: 0.0
+ num_intermediate_factor: 4
+ orientation: 'per_row'
+ shared_dropout: True
+ outer_product_mean:
+ chunk_size: 128
+ dropout_rate: 0.0
+ num_outer_channel: 32
+ orientation: 'per_row'
+ shared_dropout: True
+ triangle_attention_starting_node:
+ dropout_rate: 0.25 # 0.25
+ gating: True
+ num_head: 4
+ orientation: 'per_row'
+ shared_dropout: True
+ triangle_attention_ending_node:
+ dropout_rate: 0.25 # 0.25
+ gating: True
+ num_head: 4
+ orientation: 'per_column'
+ shared_dropout: True
+ triangle_multiplication_outgoing:
+ dropout_rate: 0.25 # 0.25
+ equation: 'ikc,jkc->ijc'
+ num_intermediate_channel: 128
+ orientation: 'per_row'
+ shared_dropout: True
+ triangle_multiplication_incoming:
+ dropout_rate: 0.25 # 0.25
+ equation: 'kjc,kic->ijc'
+ num_intermediate_channel: 128
+ orientation: 'per_row'
+ shared_dropout: True
+ pair_transition:
+ dropout_rate: 0.0
+ num_intermediate_factor: 4
+ orientation: 'per_row'
+ shared_dropout: True
+structure_module:
+ num_layer: 8
+ fape:
+ clamp_distance: 10.0
+ clamp_type: 'relu'
+ loss_unit_distance: 10.0
+ angle_norm_weight: 0.01
+ chi_weight: 0.5
+ clash_overlap_tolerance: 1.5
+ compute_in_graph_metrics: True
+ dropout: 0.1
+ num_channel: 384
+ num_head: 12
+ num_layer_in_transition: 3
+ num_point_qk: 4
+ num_point_v: 8
+ num_scalar_qk: 16
+ num_scalar_v: 16
+ position_scale: 10.0
+ sidechain:
+ atom_clamp_distance: 10.0
+ num_channel: 128
+ num_residual_block: 2
+ weight_frac: 0.5
+ length_scale: 10.
+ structural_violation_loss_weight: 1.0
+ violation_tolerance_factor: 12.0
+ weight: 1.0
+slice:
+ seq_128:
+ template_embedding: 0
+ template_pair_stack:
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ extra_msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 4
+ msa_column_global_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 0
+ msa_column_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ seq_256:
+ template_embedding: 0
+ template_pair_stack:
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ extra_msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 4
+ msa_column_global_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 0
+ msa_column_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ seq_384:
+ template_embedding: 0
+ template_pair_stack:
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ extra_msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 0
+ msa_column_global_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 0
+ msa_column_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ seq_512:
+ template_embedding: 0
+ template_pair_stack:
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ extra_msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 64
+ msa_column_global_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 0
+ msa_column_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ seq_768:
+ template_embedding: 8
+ template_pair_stack:
+ triangle_attention_starting_node: 8
+ triangle_attention_ending_node: 8
+ pair_transition: 8
+ extra_msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 128
+ msa_column_global_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 0
+ msa_column_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ seq_1024:
+ template_embedding: 8 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 8 # seq len
+ triangle_attention_ending_node: 8 # seq len
+ pair_transition: 8 # seq len
+ extra_msa_stack:
+ msa_transition: 0 # 5120
+ msa_row_attention_with_pair_bias: 128 # 5120
+ msa_column_global_attention: 8 # seq len
+ outer_product_mean: 0 # seq len
+ triangle_attention_starting_node: 8 # seq len
+ triangle_attention_ending_node: 8 # seq len
+ pair_transition: 0 # seq len
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 8
+ msa_column_attention: 8
+ outer_product_mean: 0
+ triangle_attention_starting_node: 8
+ triangle_attention_ending_node: 8
+ pair_transition: 0
+ seq_1280:
+ template_embedding: 8 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 32 # seq len
+ triangle_attention_ending_node: 32 # seq len
+ pair_transition: 8 # seq len
+ extra_msa_stack:
+ msa_transition: 0 # 5120
+ msa_row_attention_with_pair_bias: 128 # 5120
+ msa_column_global_attention: 8 # seq len
+ outer_product_mean: 0 # seq len
+ triangle_attention_starting_node: 8 # seq len
+ triangle_attention_ending_node: 8 # seq len
+ pair_transition: 0 # seq len
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 8
+ msa_column_attention: 8
+ outer_product_mean: 0
+ triangle_attention_starting_node: 8
+ triangle_attention_ending_node: 8
+ pair_transition: 0
+ seq_1536:
+ template_embedding: 16 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 32 # seq len
+ triangle_attention_ending_node: 32 # seq len
+ pair_transition: 8 # seq len
+ extra_msa_stack:
+ msa_transition: 8 # 5120
+ msa_row_attention_with_pair_bias: 256 # 5120
+ msa_column_global_attention: 32 # seq len
+ outer_product_mean: 8 # seq len
+ triangle_attention_starting_node: 32 # seq len
+ triangle_attention_ending_node: 32 # seq len
+ pair_transition: 8 # seq len
+ msa_stack:
+ msa_transition: 8
+ msa_row_attention_with_pair_bias: 32
+ msa_column_attention: 32
+ outer_product_mean: 8
+ triangle_attention_starting_node: 32
+ triangle_attention_ending_node: 32
+ pair_transition: 8
+ seq_1792:
+ template_embedding: 64 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 64 # seq len
+ triangle_attention_ending_node: 64 # seq len
+ pair_transition: 8 # seq len
+ extra_msa_stack:
+ msa_transition: 8 # 5120
+ msa_row_attention_with_pair_bias: 512 # 5120
+ msa_column_global_attention: 64 # seq len
+ outer_product_mean: 8 # seq len
+ triangle_attention_starting_node: 64 # seq len
+ triangle_attention_ending_node: 64 # seq len
+ pair_transition: 8 # seq len
+ msa_stack:
+ msa_transition: 8
+ msa_row_attention_with_pair_bias: 64
+ msa_column_attention: 64
+ outer_product_mean: 8
+ triangle_attention_starting_node: 64
+ triangle_attention_ending_node: 64
+ pair_transition: 8
+ seq_2048:
+ template_embedding: 128 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 128 # seq len
+ triangle_attention_ending_node: 128 # seq len
+ pair_transition: 128 # seq len
+ extra_msa_stack:
+ msa_transition: 128 # 5120
+ msa_row_attention_with_pair_bias: 512 # 5120
+ msa_column_global_attention: 128 # seq len
+ outer_product_mean: 128 # seq len
+ triangle_attention_starting_node: 128 # seq len
+ triangle_attention_ending_node: 128 # seq len
+ pair_transition: 128 # seq len
+ msa_stack:
+ msa_transition: 128
+ msa_row_attention_with_pair_bias: 128
+ msa_column_attention: 128
+ outer_product_mean: 128
+ triangle_attention_starting_node: 128
+ triangle_attention_ending_node: 128
+ pair_transition: 128
+ seq_2304:
+ template_embedding: 128 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 256 # seq len
+ triangle_attention_ending_node: 256 # seq len
+ pair_transition: 128 # seq len
+ extra_msa_stack:
+ msa_transition: 128 # 5120
+ msa_row_attention_with_pair_bias: 512 # 5120
+ msa_column_global_attention: 256 # seq len
+ outer_product_mean: 128 # seq len
+ triangle_attention_starting_node: 256 # seq len
+ triangle_attention_ending_node: 256 # seq len
+ pair_transition: 128 # seq len
+ msa_stack:
+ msa_transition: 128
+ msa_row_attention_with_pair_bias: 256
+ msa_column_attention: 256
+ outer_product_mean: 256
+ triangle_attention_starting_node: 256
+ triangle_attention_ending_node: 256
+ pair_transition: 128
+slice_new:
+ seq_128:
+ template_embedding: 0
+ template_pair_stack:
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ extra_msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 4
+ msa_column_global_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 0
+ msa_column_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ seq_256:
+ template_embedding: 0
+ template_pair_stack:
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ extra_msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 64
+ msa_column_global_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 0
+ msa_column_attention: 0
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ seq_384:
+ template_embedding: 8
+ template_pair_stack:
+ triangle_attention_starting_node: 8
+ triangle_attention_ending_node: 8
+ pair_transition: 8
+ extra_msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 128
+ msa_column_global_attention: 8
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 8
+ msa_column_attention: 8
+ outer_product_mean: 0
+ triangle_attention_starting_node: 8
+ triangle_attention_ending_node: 8
+ pair_transition: 0
+ seq_512:
+ template_embedding: 8
+ template_pair_stack:
+ triangle_attention_starting_node: 8
+ triangle_attention_ending_node: 8
+ pair_transition: 8
+ extra_msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 128
+ msa_column_global_attention: 8
+ outer_product_mean: 0
+ triangle_attention_starting_node: 0
+ triangle_attention_ending_node: 0
+ pair_transition: 0
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 8
+ msa_column_attention: 8
+ outer_product_mean: 0
+ triangle_attention_starting_node: 8
+ triangle_attention_ending_node: 8
+ pair_transition: 0
+ seq_768:
+ template_embedding: 64 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 64 # seq len
+ triangle_attention_ending_node: 64 # seq len
+ pair_transition: 32 # seq len
+ extra_msa_stack:
+ msa_transition: 16 # 5120
+ msa_row_attention_with_pair_bias: 512 # 5120
+ msa_column_global_attention: 32 # seq len
+ outer_product_mean: 8 # seq len
+ triangle_attention_starting_node: 32 # seq len
+ triangle_attention_ending_node: 32 # seq len
+ pair_transition: 8 # seq len
+ msa_stack:
+ msa_transition: 8
+ msa_row_attention_with_pair_bias: 32
+ msa_column_attention: 32
+ outer_product_mean: 8
+ triangle_attention_starting_node: 32
+ triangle_attention_ending_node: 32
+ pair_transition: 8
+ seq_1024:
+ template_embedding: 64 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 64 # seq len
+ triangle_attention_ending_node: 64 # seq len
+ pair_transition: 32 # seq len
+ extra_msa_stack:
+ msa_transition: 16 # 5120
+ msa_row_attention_with_pair_bias: 512 # 5120
+ msa_column_global_attention: 32 # seq len
+ outer_product_mean: 8 # seq len
+ triangle_attention_starting_node: 32 # seq len
+ triangle_attention_ending_node: 32 # seq len
+ pair_transition: 8 # seq len
+ msa_stack:
+ msa_transition: 8
+ msa_row_attention_with_pair_bias: 32
+ msa_column_attention: 32
+ outer_product_mean: 8
+ triangle_attention_starting_node: 32
+ triangle_attention_ending_node: 32
+ pair_transition: 8
+ seq_1280:
+ template_embedding: 8 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 32 # seq len
+ triangle_attention_ending_node: 32 # seq len
+ pair_transition: 8 # seq len
+ extra_msa_stack:
+ msa_transition: 0 # 5120
+ msa_row_attention_with_pair_bias: 128 # 5120
+ msa_column_global_attention: 8 # seq len
+ outer_product_mean: 0 # seq len
+ triangle_attention_starting_node: 8 # seq len
+ triangle_attention_ending_node: 8 # seq len
+ pair_transition: 0 # seq len
+ msa_stack:
+ msa_transition: 0
+ msa_row_attention_with_pair_bias: 8
+ msa_column_attention: 8
+ outer_product_mean: 0
+ triangle_attention_starting_node: 8
+ triangle_attention_ending_node: 8
+ pair_transition: 0
+ seq_1536:
+ template_embedding: 16 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 32 # seq len
+ triangle_attention_ending_node: 32 # seq len
+ pair_transition: 8 # seq len
+ extra_msa_stack:
+ msa_transition: 8 # 5120
+ msa_row_attention_with_pair_bias: 256 # 5120
+ msa_column_global_attention: 32 # seq len
+ outer_product_mean: 8 # seq len
+ triangle_attention_starting_node: 32 # seq len
+ triangle_attention_ending_node: 32 # seq len
+ pair_transition: 8 # seq len
+ msa_stack:
+ msa_transition: 8
+ msa_row_attention_with_pair_bias: 32
+ msa_column_attention: 32
+ outer_product_mean: 8
+ triangle_attention_starting_node: 32
+ triangle_attention_ending_node: 32
+ pair_transition: 8
+ seq_1792:
+ template_embedding: 64 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 64 # seq len
+ triangle_attention_ending_node: 64 # seq len
+ pair_transition: 8 # seq len
+ extra_msa_stack:
+ msa_transition: 8 # 5120
+ msa_row_attention_with_pair_bias: 512 # 5120
+ msa_column_global_attention: 64 # seq len
+ outer_product_mean: 8 # seq len
+ triangle_attention_starting_node: 64 # seq len
+ triangle_attention_ending_node: 64 # seq len
+ pair_transition: 8 # seq len
+ msa_stack:
+ msa_transition: 8
+ msa_row_attention_with_pair_bias: 64
+ msa_column_attention: 64
+ outer_product_mean: 8
+ triangle_attention_starting_node: 64
+ triangle_attention_ending_node: 64
+ pair_transition: 8
+ seq_2048:
+ template_embedding: 128 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 128 # seq len
+ triangle_attention_ending_node: 128 # seq len
+ pair_transition: 128 # seq len
+ extra_msa_stack:
+ msa_transition: 128 # 5120
+ msa_row_attention_with_pair_bias: 512 # 5120
+ msa_column_global_attention: 128 # seq len
+ outer_product_mean: 128 # seq len
+ triangle_attention_starting_node: 128 # seq len
+ triangle_attention_ending_node: 128 # seq len
+ pair_transition: 128 # seq len
+ msa_stack:
+ msa_transition: 128
+ msa_row_attention_with_pair_bias: 128
+ msa_column_attention: 128
+ outer_product_mean: 128
+ triangle_attention_starting_node: 128
+ triangle_attention_ending_node: 128
+ pair_transition: 128
+ seq_2304:
+ template_embedding: 128 # seq len * seq len
+ template_pair_stack:
+ triangle_attention_starting_node: 256 # seq len
+ triangle_attention_ending_node: 256 # seq len
+ pair_transition: 128 # seq len
+ extra_msa_stack:
+ msa_transition: 128 # 5120
+ msa_row_attention_with_pair_bias: 512 # 5120
+ msa_column_global_attention: 256 # seq len
+ outer_product_mean: 128 # seq len
+ triangle_attention_starting_node: 256 # seq len
+ triangle_attention_ending_node: 256 # seq len
+ pair_transition: 128 # seq len
+ msa_stack:
+ msa_transition: 128
+ msa_row_attention_with_pair_bias: 256
+ msa_column_attention: 256
+ outer_product_mean: 256
+ triangle_attention_starting_node: 256
+ triangle_attention_ending_node: 256
+ pair_transition: 128
+heads:
+ resolution: 1
+ predicted_lddt:
+ filter_by_resolution: True
+ max_resolution: 3.0
+ min_resolution: 0.1
+ num_bins: 50
+ num_channels: 128
+ weight: 0.01
+ distogram:
+ first_break: 2.3125
+ last_break: 21.6875
+ num_bins: 64
+ weight: 0.3
+ masked_msa:
+ num_output: 23
+ weight: 2.0
+ predicted_aligned_error:
+ max_error_bin: 31.0
+ num_bins: 64
+ num_channels: 128
+ filter_by_resolution: True
+ min_resolution: 0.1
+ max_resolution: 3.0
+ weight: 0.0
+ experimentally_resolved:
+ filter_by_resolution: True
+ max_resolution: 3.0
+ min_resolution: 0.1
+ weight: 0.01
diff --git a/MindSPONGE/applications/research/FAAST/data/__init__.py b/MindSPONGE/applications/research/FAAST/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cf35fb739b75d3dbac2f891b39f89d03b879470
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/data/__init__.py
@@ -0,0 +1,19 @@
+# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+'''init'''
+from .preprocess import Feature
+from .protein_feature import RawFeatureGenerator
+from .utils import get_crop_size, get_raw_feature
+from .dataset import create_dataset, process_pdb
diff --git a/MindSPONGE/applications/research/FAAST/data/dataset.py b/MindSPONGE/applications/research/FAAST/data/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c60a78baafbb992a0bdac6e19fdec65930281ae
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/data/dataset.py
@@ -0,0 +1,243 @@
+# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""train dataset"""
+import datetime
+import os
+import pickle
+import time
+import numpy as np
+from mindspore import dataset as ds
+from mindspore.communication import get_rank
+
+from mindsponge.common.residue_constants import make_atom14_dists_bounds, order_restype_with_x
+from mindsponge.common.protein import from_pdb_string
+from mindsponge.common.utils import make_atom14_positions, get_aligned_seq
+from mindsponge.data.data_transform import pseudo_beta_fn, atom37_to_frames, atom37_to_torsion_angles
+from .preprocess import Feature
+
+
+def create_dataset(train_data_dir, raw_feature_dir, names, data_cfg, center_name_path, shuffle=False,
+ num_parallel_worker=4,
+ is_parallel=False, mixed_precision=False):
+ """create train dataset"""
+ column_name = ["target_feat", "msa_feat", "msa_mask", "seq_mask_batch", "aatype_batch",
+ "template_aatype", "template_all_atom_masks",
+ "template_all_atom_positions", "template_mask",
+ "template_pseudo_beta_mask", "template_pseudo_beta", "extra_msa", "extra_has_deletion",
+ "extra_deletion_value", "extra_msa_mask", "residx_atom37_to_atom14",
+ "atom37_atom_exists_batch", "residue_index_batch", "prev_pos",
+ "prev_msa_first_row", "prev_pair", "pseudo_beta_gt",
+ "pseudo_beta_mask_gt", "all_atom_mask_gt",
+ "true_msa", "bert_mask", "residue_index", "seq_mask",
+ "atom37_atom_exists", "aatype", "residx_atom14_to_atom37",
+ "atom14_atom_exists", "backbone_affine_tensor", "backbone_affine_mask",
+ "atom14_gt_positions", "atom14_alt_gt_positions",
+ "atom14_atom_is_ambiguous", "atom14_gt_exists", "atom14_alt_gt_exists",
+ "all_atom_positions", "rigidgroups_gt_frames", "rigidgroups_gt_exists",
+ "rigidgroups_alt_gt_frames", "torsion_angles_sin_cos_gt", "chi_mask", "atomtype_radius",
+ "restype_atom14_bond_lower_bound", "restype_atom14_bond_upper_bound", "use_clamped_fape",
+ "filter_by_solution", "prot_name_index"]
+
+ dataset_generator = DatasetGenerator(train_data_dir, raw_feature_dir, names, data_cfg, center_name_path,
+ mixed_precision)
+ ds.config.set_prefetch_size(1)
+
+ if is_parallel:
+ rank_id = get_rank() % 8
+ rank_size = 8
+ train_dataset = ds.GeneratorDataset(source=dataset_generator, column_names=column_name,
+ num_parallel_workers=num_parallel_worker, shuffle=shuffle,
+ num_shards=rank_size,
+ shard_id=rank_id, max_rowsize=16)
+ else:
+ train_dataset = ds.GeneratorDataset(source=dataset_generator, column_names=column_name,
+ num_parallel_workers=num_parallel_worker, shuffle=shuffle, max_rowsize=16)
+ return train_dataset
+
+
+class DatasetGenerator:
+ """dataset generator"""
+
+ def __init__(self, train_data_dir, raw_feature_dir, names, data_cfg, resolution_data, mixed_precision):
+ self.t1 = time.time()
+ print("start dataset init: ", str(datetime.datetime.now()))
+ self.data_cfg = data_cfg
+ self.num_residues = data_cfg.eval.crop_size
+ self.train_data_dir = train_data_dir
+ self.raw_feature_dir = raw_feature_dir
+ self.names = [name.replace("\n", "") for name in names]
+ self.mixed_precision = mixed_precision
+
+ self.resolution_info = resolution_data
+ print("end dataset init: ", time.time() - self.t1)
+
+ def __getitem__(self, index):
+ prot_name = self.names[index]
+ prot_name_index = np.asarray([index]).astype(np.int32)
+ arrays, prev_pos, prev_msa_first_row, prev_pair, label_arrays = self._get_train_data(prot_name)
+ atomtype_radius = np.array(
+ [1.55, 1.7, 1.7, 1.7, 1.52, 1.7, 1.7, 1.7, 1.52, 1.52, 1.8, 1.7, 1.7, 1.7, 1.55, 1.55,
+ 1.52, 1.52, 1.8, 1.7, 1.7, 1.7, 1.7, 1.55, 1.55, 1.55, 1.52, 1.52, 1.7, 1.55, 1.55,
+ 1.52, 1.7, 1.7, 1.7, 1.55, 1.52])
+ restype_atom14_bond_lower_bound, restype_atom14_bond_upper_bound, _ = \
+ make_atom14_dists_bounds(overlap_tolerance=1.5, bond_length_tolerance_factor=12.0)
+ use_clamped_fape = np.random.binomial(1, 0.9, size=1)
+ filter_by_solution = np.array(1.0)
+ extra_feats = [atomtype_radius, restype_atom14_bond_lower_bound,
+ restype_atom14_bond_upper_bound, use_clamped_fape, filter_by_solution, prot_name_index]
+ dtype = np.float32
+ if self.mixed_precision:
+ dtype = np.float16
+ extra_feats = [array.astype(dtype) for array in extra_feats]
+ all_feats = arrays + [prev_pos, prev_msa_first_row, prev_pair] + label_arrays + extra_feats
+
+ return tuple(all_feats)
+
+ def __len__(self):
+ return len(self.names)
+
+ @staticmethod
+ def _get_train_labels(self, prot_pdb):
+ """get train labels"""
+ aatype = prot_pdb.aatype
+ seq_len = len(aatype)
+ atom37_positions = prot_pdb.atom_positions.astype(np.float32)
+ atom37_mask = prot_pdb.atom_mask.astype(np.float32)
+
+ # get ground truth of atom14
+ label_features = {'aatype': aatype,
+ 'all_atom_positions': atom37_positions,
+ 'all_atom_mask': atom37_mask}
+
+ atom14_features = make_atom14_positions(aatype, atom37_mask, atom37_positions)
+ atom14_keys = ["atom14_atom_exists", "atom14_gt_exists", "atom14_gt_positions", "residx_atom14_to_atom37",
+ "residx_atom37_to_atom14", "atom37_atom_exists", "atom14_alt_gt_positions",
+ "atom14_alt_gt_exists", "atom14_atom_is_ambiguous"]
+ for index, array in enumerate(atom14_features):
+ label_features[atom14_keys[index]] = array
+
+ # get ground truth of rigid groups
+ rigidgroups_label_feature = atom37_to_frames(aatype, atom37_positions, atom37_mask, is_affine=True)
+ label_features.update(rigidgroups_label_feature)
+
+ # get ground truth of angle
+ angle_label_feature = atom37_to_torsion_angles(aatype.reshape((1, -1)),
+ atom37_positions.reshape((1, seq_len, 37, 3)),
+ atom37_mask.reshape((1, seq_len, 37)), True)
+ label_features.update(angle_label_feature)
+
+ # get pseudo_beta, pseudo_beta_mask
+ pseudo_beta, pseudo_beta_mask = pseudo_beta_fn(aatype, atom37_positions, atom37_mask)
+ label_features["pseudo_beta"] = pseudo_beta
+ label_features["pseudo_beta_mask"] = pseudo_beta_mask
+ label_features["chi_mask"] = label_features.get("torsion_angles_mask")[:, 3:]
+ label_features['torsion_angles_sin_cos'] = label_features.get('torsion_angles_sin_cos')[:, 3:, :]
+ label_features['backbone_affine_mask'] = pseudo_beta_mask
+ label_features.pop("aatype")
+
+ return label_features
+
+ def _get_solution_flag(self, prot_name):
+ """get resolution data"""
+ prot_new_name = prot_name.rsplit('_', 1)[0]
+ if prot_new_name not in self.resolution_info:
+ return np.array(1.0).astype(np.float32)
+ resolution = float(self.resolution_info[prot_new_name]['resolution'])
+ nmr = self.resolution_info[prot_new_name]['method']
+ if resolution < 3 and nmr != 'NMR':
+ return np.array(1.0).astype(np.float32)
+ return np.array(0.0).astype(np.float32)
+
+ def _get_train_data(self, prot_name):
+ """get train data"""
+ pdb_path = os.path.join(self.train_data_dir, prot_name + '.pdb')
+ with open(pdb_path, 'r') as f:
+ prot_pdb = from_pdb_string(f.read())
+ f.close()
+ with open(os.path.join(self.raw_feature_dir, prot_name + '.pkl'), "rb") as f:
+ raw_feature = pickle.load(f)
+ f.close()
+ label_features = self._get_train_labels(prot_pdb)
+ seed = global_seed()
+ raw_feature.update(label_features)
+ processed_feature = Feature(self.data_cfg, raw_feature, is_training=True)
+ processed_feat = processed_feature.pipeline(self.data_cfg, self.mixed_precision, seed=seed)
+ return processed_feat
+
+
+class SeedMaker:
+ """Return unique seeds."""
+
+ def __init__(self, initial_seed=0):
+ self.next_seed = initial_seed
+
+ def __call__(self):
+ i = self.next_seed
+ self.next_seed += 1
+ return i
+
+
+global_seed = SeedMaker()
+
+
+def process_pdb(true_aatype, ori_res_length, decoy_pdb_path):
+ """get atom information from pdb"""
+ with open(decoy_pdb_path, 'r') as f:
+ decoy_prot_pdb = from_pdb_string(f.read())
+ f.close()
+ decoy_aatype = decoy_prot_pdb.aatype
+ decoy_atom37_positions = decoy_prot_pdb.atom_positions.astype(np.float32)
+ decoy_atom37_mask = decoy_prot_pdb.atom_mask.astype(np.float32)
+ padding_val = true_aatype.shape[0] - ori_res_length
+ true_aatype = true_aatype[:ori_res_length]
+ decoy_aatype, decoy_atom37_positions, decoy_atom37_mask, align_mask = \
+ align_with_aatype(true_aatype, decoy_aatype, decoy_atom37_positions, decoy_atom37_mask)
+ decoy_atom37_positions = np.pad(decoy_atom37_positions, ((0, padding_val), (0, 0), (0, 0)))
+ decoy_atom37_mask = np.pad(decoy_atom37_mask, ((0, padding_val), (0, 0)))
+ align_mask = np.pad(align_mask, ((0, padding_val)))
+
+ return decoy_atom37_positions, decoy_atom37_mask, align_mask
+
+
+def align_with_aatype(true_aatype, aatype, atom37_positions, atom37_mask):
+ """align pdb with aatype"""
+ if len(true_aatype) == len(aatype):
+ out = aatype, atom37_positions, atom37_mask, np.ones((aatype.shape[0])).astype(np.float32)
+ return out
+ seq1 = [order_restype_with_x.get(x) for x in aatype]
+ seq2 = [order_restype_with_x.get(x) for x in true_aatype]
+ seq1 = ''.join(seq1)
+ seq2 = ''.join(seq2)
+ _, align_relationship, _ = get_aligned_seq(seq1, seq2)
+ pdb_index = 0
+ seq_len = len(true_aatype)
+ new_aatype = np.zeros((seq_len,)).astype(np.int32)
+ new_atom37_positions = np.zeros((seq_len, 37, 3)).astype(np.float32)
+ new_atom37_mask = np.zeros((seq_len, 37)).astype(np.float32)
+ align_mask = np.zeros((seq_len,)).astype(np.float32)
+ for i in range(len(true_aatype)):
+ if align_relationship[i] == "-":
+ new_aatype[i] = 20
+ new_atom37_positions[i] = np.zeros((37, 3)).astype(np.float32)
+ new_atom37_mask[i] = np.zeros((37,)).astype(np.float32)
+ align_mask[i] = 0
+ else:
+ new_aatype[i] = aatype[pdb_index]
+ new_atom37_positions[i] = atom37_positions[pdb_index]
+ new_atom37_mask[i] = atom37_mask[pdb_index]
+ align_mask[i] = 1
+ pdb_index += 1
+ out = new_aatype, new_atom37_positions, new_atom37_mask, align_mask
+ return out
diff --git a/MindSPONGE/applications/research/FAAST/data/hhsearch.py b/MindSPONGE/applications/research/FAAST/data/hhsearch.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c1eb40fa0a7d813a0ebead07c33f4687e403ae8
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/data/hhsearch.py
@@ -0,0 +1,84 @@
+# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+HHsearch tools.
+"""
+
+import glob
+import os
+import stat
+import subprocess
+
+from absl import logging
+from data.utils import tmpdir_manager, timing
+
+
+class HHSearch:
+ """Python wrapper of the HHsearch binary.
+ Cited from https://github.com/deepmind/alphafold.
+ """
+
+ def __init__(self,
+ binary_path,
+ databases,
+ maxseq=1_000_000):
+ """Initializes the Python HHsearch wrapper.
+
+ Args:
+ binary_path: The path to the HHsearch executable.
+ databases: A sequence of HHsearch database paths. This should be the
+ common prefix for the database files (i.e. up to but not including
+ _hhm.ffindex etc.)
+ maxseq: The maximum number of rows in an input alignment. Note that this
+ parameter is only supported in HHBlits version 3.1 and higher.
+
+ Raises:
+ RuntimeError: If HHsearch binary not found within the path.
+ """
+ self.binary_path = binary_path
+ self.databases = databases
+ self.maxseq = maxseq
+
+ for database_path in self.databases:
+ if not glob.glob(database_path + '_*'):
+ raise ValueError(f'Could not find HHsearch database {database_path}')
+
+ def query(self, a3m):
+ """Queries the database using HHsearch using a given a3m."""
+ with tmpdir_manager(base_dir='/tmp') as query_tmp_dir:
+ input_path = os.path.join(query_tmp_dir, 'query.a3m')
+ hhr_path = os.path.join(query_tmp_dir, 'output.hhr')
+ with os.fdopen(os.open(input_path, os.O_RDWR | os.O_CREAT, stat.S_IRWXU), 'w') as f:
+ f.write(a3m)
+
+ db_cmd = []
+ for db_path in self.databases:
+ db_cmd.append('-d')
+ db_cmd.append(db_path)
+ cmd = [self.binary_path, '-i', input_path, '-o', hhr_path, '-maxseq', str(self.maxseq), '-cpu',
+ '8',] + db_cmd
+
+ logging.info('Launching subprocess "%s"', ' '.join(cmd))
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ with timing('HHsearch query'):
+ stdout, stderr = process.communicate()
+ retcode = process.wait()
+ if retcode:
+ # Stderr is truncated to prevent proto size errors in Beam.
+ raise RuntimeError('HHSearch failed:\nstdout:\n%s\n\nstderr:\n%s\n' % (
+ stdout.decode('utf-8'), stderr[:100_000].decode('utf-8')))
+ with open(hhr_path) as f:
+ hhr = f.read()
+ return hhr
diff --git a/MindSPONGE/applications/research/FAAST/data/kalign.py b/MindSPONGE/applications/research/FAAST/data/kalign.py
new file mode 100644
index 0000000000000000000000000000000000000000..465216c5aba8464c9236be58f30fa865ee405ef9
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/data/kalign.py
@@ -0,0 +1,96 @@
+# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+Kalign tools.
+"""
+
+import os
+import stat
+import subprocess
+
+from absl import logging
+from data.utils import tmpdir_manager, timing
+
+
+class Kalign:
+ """Python wrapper of the Kalign binary."""
+
+ def __init__(self, *, binary_path: str):
+ """Initializes the Python Kalign wrapper.
+ Cited from "https://github.com/deepmind/alphafold"
+ Args:
+ binary_path: The path to the Kalign binary.
+ """
+ self.binary_path = binary_path
+
+ @staticmethod
+ def to_a3m(sequences):
+ """Converts sequences to an a3m file."""
+ names = ['sequence %d' % i for i in range(1, len(sequences) + 1)]
+ a3m = []
+ for sequence, name in zip(sequences, names):
+ a3m.append(u'>' + name + u'\n')
+ a3m.append(sequence + u'\n')
+ return ''.join(a3m)
+
+
+ def align(self, sequences):
+ """Aligns the sequences and returns the alignment in A3M string.
+
+ Args:
+ sequences: A list of query sequence strings. The sequences have to be at
+ least 6 residues long (Kalign requires this). Note that the order in
+ which you give the sequences might alter the output slightly as
+ different alignment tree might get constructed.
+
+ Returns:
+ A string with the alignment in a3m format.
+
+ Raises:
+ RuntimeError: If Kalign fails.
+ ValueError: If any of the sequences is less than 6 residues long.
+ """
+ logging.info('Aligning %d sequences', len(sequences))
+
+ for s in sequences:
+ if len(s) < 6:
+ raise ValueError('Kalign requires all sequences to be at least 6 '
+ 'residues long. Got %s (%d residues).' % (s, len(s)))
+
+ with tmpdir_manager(base_dir='/tmp') as query_tmp_dir:
+ input_fasta_path = os.path.join(query_tmp_dir, 'input.fasta')
+ output_a3m_path = os.path.join(query_tmp_dir, 'output.a3m')
+
+ with os.fdopen(os.open(input_fasta_path, os.O_RDWR|os.O_CREAT, stat.S_IRWXU), 'w') as f:
+ f.write(self.to_a3m(sequences))
+
+ cmd = [self.binary_path, '-i', input_fasta_path, '-o', output_a3m_path, '-format', 'fasta',]
+
+ logging.info('Launching subprocess "%s"', ' '.join(cmd))
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ with timing('Kalign query'):
+ stdout, stderr = process.communicate()
+ retcode = process.wait()
+ logging.info('Kalign stdout:\n%s\n\nstderr:\n%s\n', stdout.decode('utf-8'), stderr.decode('utf-8'))
+
+ if retcode:
+ raise RuntimeError(
+ 'Kalign failed\nstdout:\n%s\n\nstderr:\n%s\n' % (stdout.decode('utf-8'), stderr.decode('utf-8')))
+
+ with open(output_a3m_path) as f:
+ a3m = f.read()
+
+ return a3m
diff --git a/MindSPONGE/applications/research/FAAST/data/msa_query.py b/MindSPONGE/applications/research/FAAST/data/msa_query.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f0b27ae00dcd39775d73016c33181e55e4918f3
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/data/msa_query.py
@@ -0,0 +1,71 @@
+# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+MSA query tools.
+"""
+
+import os
+
+
+class MmseqQuery:
+ """Runs the alignment tools"""
+
+ def __init__(self,
+ database_envdb_dir,
+ mmseqs_binary,
+ uniref30_path,
+ result_path,
+ msa_search_sh=os.path.join(os.path.dirname(__file__),
+ "msa_search.sh")):
+ """Search the a3m info for a given FASTA file."""
+
+ self.database_envdb_dir = database_envdb_dir
+ self.mmseqs_binary = mmseqs_binary
+ self.uniref30_path = uniref30_path
+ self.result_path = result_path
+ self.msa_search_sh = msa_search_sh
+
+ @staticmethod
+ def get_a3mlines(a3m_paths):
+ """combine a3m files together"""
+ a3m_lines = {}
+ for a3m_file in a3m_paths:
+ update_m, m = True, None
+ with open(a3m_file, "r") as f:
+ lines = f.readlines()
+ for line in lines:
+ if "\x00" in line:
+ line = line.replace("\x00", "")
+ update_m = True
+ if line.startswith(">") and update_m:
+ try:
+ m = int(line.strip()[-1])
+ except ValueError:
+ m = str(line.strip()[-1])
+ update_m = False
+ if m not in a3m_lines:
+ a3m_lines[m] = []
+ a3m_lines.get(m).append(line)
+ a3m_lines = ["".join(a3m_lines.get(key)) for key in a3m_lines]
+ return a3m_lines[0]
+
+ def aligned_a3m_files(self, result_path):
+ """Runs alignment tools on the input sequence and creates features."""
+
+ a3m_file_paths = os.listdir(result_path)
+ a3m_file_paths = [os.path.join(result_path, x) for x in a3m_file_paths if x.endswith("a3m")]
+ a3m_lines = self.get_a3mlines(a3m_paths=a3m_file_paths)
+
+ return a3m_lines
diff --git a/MindSPONGE/applications/research/FAAST/data/parsers.py b/MindSPONGE/applications/research/FAAST/data/parsers.py
new file mode 100644
index 0000000000000000000000000000000000000000..adb026b105103ca38f69023060cdd3a6e79d6aa0
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/data/parsers.py
@@ -0,0 +1,621 @@
+# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+Read information from a mmcif format file.
+"""
+import re
+import string
+import collections
+import io
+import dataclasses
+from typing import Any, Mapping, Optional, Sequence, Tuple, List
+from absl import logging
+from Bio import PDB
+from Bio.Data import SCOPData
+
+
+
+@dataclasses.dataclass(frozen=True)
+class HhrHit:
+ """Class representing a hit in an hhr file."""
+ index: int
+ name: str
+ prob_true: float
+ e_value: float
+ score: float
+ aligned_cols: int
+ identity: float
+ similarity: float
+ sum_probs: float
+ neff: float
+ query: str
+ hit_sequence: str
+ hit_dssp: str
+ column_score_code: str
+ confidence_scores: str
+ indices_query: List[int]
+ indices_hit: List[int]
+
+
+# Type aliases:
+ChainId = str
+PdbHeader = Mapping[str, Any]
+PDBSTRUCTURE = PDB.Structure.Structure
+SeqRes = str
+MmCIFDict = Mapping[str, Sequence[str]]
+
+
+@dataclasses.dataclass(frozen=True)
+class Monomer:
+ id: str
+ num: int
+
+
+# Note - mmCIF format provides no guarantees on the type of author-assigned
+# sequence numbers. They need not be integers.
+@dataclasses.dataclass(frozen=True)
+class AtomSite:
+ residue_name: str
+ author_chain_id: str
+ mmcif_chain_id: str
+ author_seq_num: str
+ mmcif_seq_num: int
+ insertion_code: str
+ hetatm_atom: str
+ model_num: int
+
+
+# Used to map SEQRES index to a residue in the structure.
+@dataclasses.dataclass(frozen=True)
+class ResiduePosition:
+ chain_id: str
+ residue_number: int
+ insertion_code: str
+
+
+@dataclasses.dataclass(frozen=True)
+class ResidueAtPosition:
+ position: Optional[ResiduePosition]
+ name: str
+ is_missing: bool
+ hetflag: str
+
+
+@dataclasses.dataclass(frozen=True)
+class MmcifObject:
+ """Representation of a parsed mmCIF file.
+
+ Contains:
+ file_id: A meaningful name, e.g. a pdb_id. Should be unique amongst all
+ files being processed.
+ header: Biopython header.
+ structure: Biopython structure.
+ chain_to_seqres: Dict mapping chain_id to 1 letter amino acid sequence. E.g.
+ {'A': 'ABCDEFG'}
+ seqres_to_structure: Dict; for each chain_id contains a mapping between
+ SEQRES index and a ResidueAtPosition. e.g. {'A': {0: ResidueAtPosition,
+ 1: ResidueAtPosition,
+ ...}}
+ raw_string: The raw string used to construct the MmcifObject.
+ """
+ file_id: str
+ header: PdbHeader
+ structure: PDBSTRUCTURE
+ chain_to_seqres: Mapping[ChainId, SeqRes]
+ seqres_to_structure: Mapping[ChainId, Mapping[int, ResidueAtPosition]]
+ raw_string: Any
+
+
+@dataclasses.dataclass(frozen=True)
+class ParsingResult:
+ """Returned by the parse function.
+
+ Contains:
+ mmcif_object: A MmcifObject, may be None if no chain could be successfully
+ parsed.
+ errors: A dict mapping (file_id, chain_id) to any exception generated.
+ """
+ mmcif_object: Optional[MmcifObject]
+ errors: Mapping[Tuple[str, str], Any]
+
+
+def _update_hhr_residue_indices_list(
+ sequence, start_index, indices_list):
+ """Computes the relative indices for each residue with respect to the original sequence."""
+ counter = start_index
+ for symbol in sequence:
+ if symbol == '-':
+ indices_list.append(-1)
+ else:
+ indices_list.append(counter)
+ counter += 1
+
+
+def _get_hhr_line_regex_groups(
+ regex_pattern: str, line: str):
+ match = re.match(regex_pattern, line)
+ if match is None:
+ raise RuntimeError(f'Could not parse query line {line}')
+ return match.groups()
+
+
+def parse_fasta(fasta_string: str):
+ """Parses FASTA string and returns list of strings with amino-acid sequences.
+
+ Arguments:
+ fasta_string: The string contents of a FASTA file.
+
+ Returns:
+ A tuple of two lists:
+ * A list of sequences.
+ * A list of sequence descriptions taken from the comment lines. In the
+ same order as the sequences.
+ """
+ sequences = []
+ descriptions = []
+ index = -1
+ for line in fasta_string.splitlines():
+ line = line.strip()
+ if line.startswith('>'):
+ index += 1
+ descriptions.append(line[1:]) # Remove the '>' at the beginning.
+ sequences.append('')
+ continue
+ elif not line:
+ continue # Skip blank lines.
+ sequences[index] += line
+
+ return sequences, descriptions
+
+
+def _parse_hhr_hit(detailed_lines):
+ """Parses the detailed HMM HMM comparison section for a single Hit.
+
+ This works on .hhr files generated from both HHBlits and HHSearch.
+
+ Args:
+ detailed_lines: A list of lines from a single comparison section between 2
+ sequences (which each have their own HMM's)
+
+ Returns:
+ A dictionary with the information from that detailed comparison section
+
+ Raises:
+ RuntimeError: If a certain line cannot be processed
+ """
+ # Parse first 2 lines.
+ number_of_hit = int(detailed_lines[0].split()[-1])
+ name_hit = detailed_lines[1][1:]
+
+ # Parse the summary line.
+ pattern = (
+ 'Probab=(.*)[\t ]*E-value=(.*)[\t ]*Score=(.*)[\t ]*Aligned_cols=(.*)[\t'
+ ' ]*Identities=(.*)%[\t ]*Similarity=(.*)[\t ]*Sum_probs=(.*)[\t '
+ ']*Template_Neff=(.*)')
+ match = re.match(pattern, detailed_lines[2])
+ if match is None:
+ raise RuntimeError(
+ 'Could not parse section: %s. Expected this: \n%s to contain summary.' %
+ (detailed_lines, detailed_lines[2]))
+ (prob_true, e_value, score, aligned_cols, identity, similarity, sum_probs,
+ neff) = [float(x) for x in match.groups()]
+
+ # The next section reads the detailed comparisons. These are in a 'human
+ # readable' format which has a fixed length. The strategy employed is to
+ # assume that each block starts with the query sequence line, and to parse
+ # that with a regexp in order to deduce the fixed length used for that
+ # block.
+ query = ''
+ hit_sequence = ''
+ hit_dssp = ''
+ column_score_code = ''
+ confidence_scores = ''
+ indices_query = []
+ indices_hit = []
+ length_block = None
+
+ for line in detailed_lines[3:]:
+ # Parse the query sequence line
+ if (line.startswith('Q ') and not line.startswith('Q ss_dssp') and not line.startswith('Q ss_pred') \
+ and not line.startswith('Q Consensus')):
+ # Thus the first 17 characters must be 'Q ', and we can parse
+ # everything after that.
+ # start sequence end total_sequence_length
+ patt = r'[\t ]*([0-9]*) ([A-Z-]*)[\t ]*([0-9]*) \([0-9]*\)'
+ groups = _get_hhr_line_regex_groups(patt, line[17:])
+
+ # Get the length of the parsed block using the start and finish indices,
+ # and ensure it is the same as the actual block length.
+ start = int(groups[0]) - 1 # Make index zero based.
+ delta_query = groups[1]
+ end = int(groups[2])
+ num_insertions = len([x for x in delta_query if x == '-'])
+ length_block = end - start + num_insertions
+ assert length_block == len(delta_query)
+
+ # Update the query sequence and indices list.
+ query += delta_query
+ _update_hhr_residue_indices_list(delta_query, start, indices_query)
+
+ elif line.startswith('T '):
+ # Parse the hit dssp line.
+ if line.startswith('T ss_dssp'):
+ # T ss_dssp hit_dssp
+ patt = r'T ss_dssp[\t ]*([A-Z-]*)'
+ groups = _get_hhr_line_regex_groups(patt, line)
+ assert len(groups[0]) == length_block
+ hit_dssp += groups[0]
+
+ # Parse the hit sequence.
+ elif (not line.startswith('T ss_pred') and
+ not line.startswith('T Consensus')):
+ # Thus the first 17 characters must be 'T ', and we can
+ # parse everything after that.
+ # start sequence end total_sequence_length
+ patt = r'[\t ]*([0-9]*) ([A-Z-]*)[\t ]*[0-9]* \([0-9]*\)'
+ groups = _get_hhr_line_regex_groups(patt, line[17:])
+ start = int(groups[0]) - 1 # Make index zero based.
+ delta_hit_sequence = groups[1]
+ assert length_block == len(delta_hit_sequence)
+
+ # Update the hit sequence and indices list.
+ hit_sequence += delta_hit_sequence
+ _update_hhr_residue_indices_list(
+ delta_hit_sequence, start, indices_hit)
+
+ # Parse the column score line.
+ elif line.startswith(' ' * 22):
+ assert length_block
+ column_score_code += line[22:length_block + 22]
+
+ # Update confidence score.
+ elif line.startswith('Confidence'):
+ assert length_block
+ confidence_scores += line[22:length_block + 22]
+
+ return HhrHit(
+ index=number_of_hit,
+ name=name_hit,
+ prob_true=prob_true,
+ e_value=e_value,
+ score=score,
+ aligned_cols=int(aligned_cols),
+ identity=identity,
+ similarity=similarity,
+ sum_probs=sum_probs,
+ neff=neff,
+ query=query,
+ hit_sequence=hit_sequence,
+ hit_dssp=hit_dssp,
+ column_score_code=column_score_code,
+ confidence_scores=confidence_scores,
+ indices_query=indices_query,
+ indices_hit=indices_hit,
+ )
+
+
+def parse_hhr(hhr_string: str):
+ """Parses the content of an entire HHR file."""
+ lines = hhr_string.splitlines()
+
+ # Each .hhr file starts with a results table, then has a sequence of hit
+ # "paragraphs", each paragraph starting with a line 'No '. We
+ # iterate through each paragraph to parse each hit.
+
+ block_starts = [i for i, line in enumerate(lines) if line.startswith('No ')]
+
+ hits = []
+ if block_starts:
+ block_starts.append(len(lines)) # Add the end of the final block.
+ for i in range(len(block_starts) - 1):
+ hits.append(_parse_hhr_hit(lines[block_starts[i]:block_starts[i + 1]]))
+ return hits
+
+
+def parse_a3m(a3m_string: str):
+ """Parses sequences and deletion matrix from a3m format alignment.
+
+ Args:
+ a3m_string: The string contents of a a3m file. The first sequence in the
+ file should be the query sequence.
+
+ Returns:
+ A tuple of:
+ * A list of sequences that have been aligned to the query. These
+ might contain duplicates.
+ * The deletion matrix for the alignment as a list of lists. The element
+ at `deletion_matrix[i][j]` is the number of residues deleted from
+ the aligned sequence i at residue position j.
+ """
+ sequences, _ = parse_fasta(a3m_string)
+ deletion_matrix = []
+ for msa_sequence in sequences:
+ deletion_vec = []
+ deletion_count = 0
+ for j in msa_sequence:
+ if j.islower():
+ deletion_count += 1
+ else:
+ deletion_vec.append(deletion_count)
+ deletion_count = 0
+ deletion_matrix.append(deletion_vec)
+
+ # Make the MSA matrix out of aligned (deletion-free) sequences.
+ deletion_table = str.maketrans('', '', string.ascii_lowercase)
+ aligned_sequences = [s.translate(deletion_table) for s in sequences]
+ return aligned_sequences, deletion_matrix
+
+
+def mmcif_loop_to_list(prefix, parsed_info):
+ """Extracts loop associated with a prefix from mmCIF data as a list.
+
+ Reference for loop_ in mmCIF:
+ http://mmcif.wwpdb.org/docs/tutorials/mechanics/pdbx-mmcif-syntax.html
+
+ Args:
+ prefix: Prefix shared by each of the data items in the loop.
+ e.g. '_entity_poly_seq.', where the data items are _entity_poly_seq.num,
+ _entity_poly_seq.mon_id. Should include the trailing period.
+ parsed_info: A dict of parsed mmCIF data, e.g. _mmcif_dict from a Biopython
+ parser.
+
+ Returns:
+ Returns a list of dicts; each dict represents 1 entry from an mmCIF loop.
+ """
+ cols = []
+ data = []
+ for key, value in parsed_info.items():
+ if key.startswith(prefix):
+ cols.append(key)
+ data.append(value)
+
+ assert all([len(xs) == len(data[0]) for xs in data]), ('mmCIF error: Not all loops are the same length: %s' % cols)
+
+ return [dict(zip(cols, xs)) for xs in zip(*data)]
+
+
+def mmcif_loop_to_dict(prefix, index, parsed_info):
+ """Extracts loop associated with a prefix from mmCIF data as a dictionary.
+
+ Args:
+ prefix: Prefix shared by each of the data items in the loop.
+ e.g. '_entity_poly_seq.', where the data items are _entity_poly_seq.num,
+ _entity_poly_seq.mon_id. Should include the trailing period.
+ index: Which item of loop data should serve as the key.
+ parsed_info: A dict of parsed mmCIF data, e.g. _mmcif_dict from a Biopython
+ parser.
+
+ Returns:
+ Returns a dict of dicts; each dict represents 1 entry from an mmCIF loop,
+ indexed by the index column.
+ """
+ entries = mmcif_loop_to_list(prefix, parsed_info)
+ return {entry[index]: entry for entry in entries}
+
+
+def parse_mmcif(*,
+ file_id: str,
+ mmcif_string: str,
+ catch_all_errors: bool = True):
+ """Entry point, parses an mmcif_string.
+
+ Args:
+ file_id: A string identifier for this file. Should be unique within the
+ collection of files being processed.
+ mmcif_string: Contents of an mmCIF file.
+ catch_all_errors: If True, all exceptions are caught and error messages are
+ returned as part of the ParsingResult. If False exceptions will be allowed
+ to propagate.
+
+ Returns:
+ A ParsingResult.
+ """
+ errors = {}
+ try:
+ parser = PDB.MMCIFParser(QUIET=True)
+ handle = io.StringIO(mmcif_string)
+ full_structure = parser.get_structure('', handle)
+ first_model_structure = _get_first_model(full_structure)
+ # Extract the _mmcif_dict from the parser, which contains useful fields not
+ # reflected in the Biopython structure.
+ parsed_info = parser._mmcif_dict # pylint:disable=protected-access
+
+ # Ensure all values are lists, even if singletons.
+ for key, value in parsed_info.items():
+ if not isinstance(value, list):
+ parsed_info[key] = [value]
+
+ header = _get_header(parsed_info)
+
+ # Determine the protein chains, and their start numbers according to the
+ # internal mmCIF numbering scheme (likely but not guaranteed to be 1).
+ valid_chains = _get_protein_chains(parsed_info=parsed_info)
+ if not valid_chains:
+ return ParsingResult(None, {(file_id, ''): 'No protein chains found in this file.'})
+ seq_start_num = {chain_id: min([monomer.num for monomer in seq]) for chain_id, seq in valid_chains.items()}
+
+ # Loop over the atoms for which we have coordinates. Populate two mappings:
+ # -mmcif_to_author_chain_id (maps internal mmCIF chain ids to chain ids used
+ # the authors / Biopython).
+ # -seq_to_structure_mappings (maps idx into sequence to ResidueAtPosition).
+ mmcif_to_author_chain_id = {}
+ seq_to_structure_mappings = {}
+ for atom in _get_atom_site_list(parsed_info):
+ if atom.model_num != '1':
+ # We only process the first model at the moment.
+ continue
+
+ mmcif_to_author_chain_id[atom.mmcif_chain_id] = atom.author_chain_id
+
+ if atom.mmcif_chain_id in valid_chains:
+ hetflag = ' '
+ if atom.hetatm_atom == 'HETATM':
+ # Water atoms are assigned a special hetflag of W in Biopython. We
+ # need to do the same, so that this hetflag can be used to fetch
+ # a residue from the Biopython structure by id.
+ if atom.residue_name in ('HOH', 'WAT'):
+ hetflag = 'W'
+ else:
+ hetflag = 'H_' + atom.residue_name
+ insertion_code = atom.insertion_code
+ if not _is_set(atom.insertion_code):
+ insertion_code = ' '
+ position = ResiduePosition(chain_id=atom.author_chain_id, residue_number=int(
+ atom.author_seq_num), insertion_code=insertion_code)
+ seq_idx = int(atom.mmcif_seq_num) - seq_start_num[atom.mmcif_chain_id]
+ current = seq_to_structure_mappings.get(atom.author_chain_id, {})
+ current[seq_idx] = ResidueAtPosition(position=position,
+ name=atom.residue_name,
+ is_missing=False,
+ hetflag=hetflag)
+ seq_to_structure_mappings[atom.author_chain_id] = current
+
+ # Add missing residue information to seq_to_structure_mappings.
+ for chain_id, seq_info in valid_chains.items():
+ author_chain = mmcif_to_author_chain_id.get(chain_id)
+ current_mapping = seq_to_structure_mappings.get(author_chain)
+ for idx, monomer in enumerate(seq_info):
+ if idx not in current_mapping:
+ current_mapping[idx] = ResidueAtPosition(position=None,
+ name=monomer.id,
+ is_missing=True,
+ hetflag=' ')
+
+ author_chain_to_sequence = {}
+ for chain_id, seq_info in valid_chains.items():
+ author_chain = mmcif_to_author_chain_id.get(chain_id)
+ seq = []
+ for monomer in seq_info:
+ code = SCOPData.protein_letters_3to1.get(monomer.id, 'X')
+ seq.append(code if len(code) == 1 else 'X')
+ seq = ''.join(seq)
+ author_chain_to_sequence[author_chain] = seq
+
+ mmcif_object = MmcifObject(
+ file_id=file_id,
+ header=header,
+ structure=first_model_structure,
+ chain_to_seqres=author_chain_to_sequence,
+ seqres_to_structure=seq_to_structure_mappings,
+ raw_string=parsed_info)
+
+ return ParsingResult(mmcif_object=mmcif_object, errors=errors)
+ except Exception as e: # pylint:disable=broad-except
+ errors[(file_id, '')] = e
+ if not catch_all_errors:
+ raise
+ return ParsingResult(mmcif_object=None, errors=errors)
+
+
+def _get_first_model(structure: PDBSTRUCTURE) -> PDBSTRUCTURE:
+ """Returns the first model in a Biopython structure."""
+ return next(structure.get_models())
+
+
+_MIN_LENGTH_OF_CHAIN_TO_BE_COUNTED_AS_PEPTIDE = 21
+
+
+def get_release_date(parsed_info: MmCIFDict) -> str:
+ """Returns the oldest revision date."""
+ revision_dates = parsed_info['_pdbx_audit_revision_history.revision_date']
+ return min(revision_dates)
+
+
+def _get_header(parsed_info: MmCIFDict) -> PdbHeader:
+ """Returns a basic header containing method, release date and resolution."""
+ header = {}
+
+ experiments = mmcif_loop_to_list('_exptl.', parsed_info)
+ header['structure_method'] = ','.join([experiment['_exptl.method'].lower() for experiment in experiments])
+
+ # Note: The release_date here corresponds to the oldest revision. We prefer to
+ # use this for dataset filtering over the deposition_date.
+ if '_pdbx_audit_revision_history.revision_date' in parsed_info:
+ header['release_date'] = get_release_date(parsed_info)
+ else:
+ logging.warning('Could not determine release_date: %s', parsed_info['_entry.id'])
+
+ header['resolution'] = 0.00
+ for res_key in ('_refine.ls_d_res_high', '_em_3d_reconstruction.resolution', '_reflns.d_resolution_high'):
+ if res_key in parsed_info:
+ try:
+ raw_resolution = parsed_info[res_key][0]
+ header['resolution'] = float(raw_resolution)
+ except ValueError:
+ logging.warning('Invalid resolution format: %s', parsed_info[res_key])
+
+ return header
+
+
+def _get_atom_site_list(parsed_info: MmCIFDict) -> Sequence[AtomSite]:
+ """Returns list of atom sites; contains data not present in the structure."""
+ return [AtomSite(*site) for site in zip( # pylint:disable=g-complex-comprehension
+ parsed_info['_atom_site.label_comp_id'],
+ parsed_info['_atom_site.auth_asym_id'],
+ parsed_info['_atom_site.label_asym_id'],
+ parsed_info['_atom_site.auth_seq_id'],
+ parsed_info['_atom_site.label_seq_id'],
+ parsed_info['_atom_site.pdbx_PDB_ins_code'],
+ parsed_info['_atom_site.group_PDB'],
+ parsed_info['_atom_site.pdbx_PDB_model_num'],
+ )]
+
+
+def _get_protein_chains(*, parsed_info: Mapping[str, Any]) -> Mapping[ChainId, Sequence[Monomer]]:
+ """Extracts polymer information for protein chains only.
+
+ Args:
+ parsed_info: _mmcif_dict produced by the Biopython parser.
+
+ Returns:
+ A dict mapping mmcif chain id to a list of Monomers.
+ """
+ # Get polymer information for each entity in the structure.
+ entity_poly_seqs = mmcif_loop_to_list('_entity_poly_seq.', parsed_info)
+
+ polymers = collections.defaultdict(list)
+ for entity_poly_seq in entity_poly_seqs:
+ polymers[entity_poly_seq['_entity_poly_seq.entity_id']].append(
+ Monomer(id=entity_poly_seq['_entity_poly_seq.mon_id'], num=int(entity_poly_seq['_entity_poly_seq.num'])))
+
+ # Get chemical compositions. Will allow us to identify which of these polymers
+ # are proteins.
+ chem_comps = mmcif_loop_to_dict('_chem_comp.', '_chem_comp.id', parsed_info)
+
+ # Get chains information for each entity. Necessary so that we can return a
+ # dict keyed on chain id rather than entity.
+ struct_asyms = mmcif_loop_to_list('_struct_asym.', parsed_info)
+
+ entity_to_mmcif_chains = collections.defaultdict(list)
+ for struct_asym in struct_asyms:
+ chain_id = struct_asym['_struct_asym.id']
+ entity_id = struct_asym['_struct_asym.entity_id']
+ entity_to_mmcif_chains[entity_id].append(chain_id)
+
+ # Identify and return the valid protein chains.
+ valid_chains = {}
+ for entity_id, seq_info in polymers.items():
+ chain_ids = entity_to_mmcif_chains[entity_id]
+
+ # Reject polymers without any peptide-like components, such as DNA/RNA.
+ if any(['peptide' in chem_comps[monomer.id]['_chem_comp.type'] for monomer in seq_info]):
+ for chain_id in chain_ids:
+ valid_chains[chain_id] = seq_info
+ return valid_chains
+
+
+def _is_set(data: str) -> bool:
+ """Returns False if data is a special mmCIF character indicating 'unset'."""
+ return data not in ('.', '?')
diff --git a/MindSPONGE/applications/research/FAAST/data/preprocess.py b/MindSPONGE/applications/research/FAAST/data/preprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d0291f4fb68b2422842d6acc0b900ed11b6446e
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/data/preprocess.py
@@ -0,0 +1,554 @@
+# Copyright 2022 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""data process"""
+import numpy as np
+
+from mindsponge.data.data_transform import one_hot, correct_msa_restypes, randomly_replace_msa_with_unknown, \
+ fix_templates_aatype, pseudo_beta_fn, make_atom14_masks, \
+ block_delete_msa_indices, sample_msa, make_masked_msa, \
+ nearest_neighbor_clusters, summarize_clusters, crop_extra_msa, \
+ make_msa_feat, random_crop_to_size, generate_random_sample
+from mindsponge.common.residue_constants import atom_type_num
+
+NUM_RES = 'num residues placeholder'
+NUM_MSA_SEQ = 'msa placeholder'
+NUM_EXTRA_SEQ = 'extra msa placeholder'
+NUM_TEMPLATES = 'num templates placeholder'
+NUM_SEQ = "length msa placeholder"
+NUM_NOISE = 'num noise placeholder'
+NUM_LATENT_DIM = "num latent placeholder"
+_MSA_FEATURE_NAMES = ['msa', 'deletion_matrix', 'msa_mask', 'msa_row_mask', 'bert_mask', 'true_msa', 'msa_input']
+
+FEATURES = {
+ # Static features of a protein sequence
+ "aatype": (np.float32, [NUM_RES, 21]),
+ "between_segment_residues": (np.int64, [NUM_RES, 1]),
+ "deletion_matrix": (np.float32, [NUM_SEQ, NUM_RES, 1]),
+ "msa": (np.int64, [NUM_SEQ, NUM_RES, 1]),
+ "num_alignments": (np.int64, [NUM_RES, 1]),
+ "residue_index": (np.int64, [NUM_RES, 1]),
+ "seq_length": (np.int64, [NUM_RES, 1]),
+ "all_atom_positions": (np.float32, [NUM_RES, atom_type_num, 3]),
+ "all_atom_mask": (np.int64, [NUM_RES, atom_type_num]),
+ "resolution": (np.float32, [1]),
+ "template_domain_names": (str, [NUM_TEMPLATES]),
+ "template_sum_probs": (np.float32, [NUM_TEMPLATES, 1]),
+ "template_aatype": (np.float32, [NUM_TEMPLATES, NUM_RES, 22]),
+ "template_all_atom_positions": (np.float32, [NUM_TEMPLATES, NUM_RES, atom_type_num, 3]),
+ "template_all_atom_masks": (np.float32, [NUM_TEMPLATES, NUM_RES, atom_type_num, 1]),
+ "atom14_atom_exists": (np.float32, [NUM_RES, 14]),
+ "atom14_gt_exists": (np.float32, [NUM_RES, 14]),
+ "atom14_gt_positions": (np.float32, [NUM_RES, 14, 3]),
+ "residx_atom14_to_atom37": (np.float32, [NUM_RES, 14]),
+ "residx_atom37_to_atom14": (np.float32, [NUM_RES, 37]),
+ "atom37_atom_exists": (np.float32, [NUM_RES, 37]),
+ "atom14_alt_gt_positions": (np.float32, [NUM_RES, 14, 3]),
+ "atom14_alt_gt_exists": (np.float32, [NUM_RES, 14]),
+ "atom14_atom_is_ambiguous": (np.float32, [NUM_RES, 14]),
+ "rigidgroups_gt_frames": (np.float32, [NUM_RES, 8, 12]),
+ "rigidgroups_gt_exists": (np.float32, [NUM_RES, 8]),
+ "rigidgroups_group_exists": (np.float32, [NUM_RES, 8]),
+ "rigidgroups_group_is_ambiguous": (np.float32, [NUM_RES, 8]),
+ "rigidgroups_alt_gt_frames": (np.float32, [NUM_RES, 8, 12]),
+ "backbone_affine_tensor": (np.float32, [NUM_RES, 7]),
+ "torsion_angles_sin_cos": (np.float32, [NUM_RES, 4, 2]),
+ "torsion_angles_mask": (np.float32, [NUM_RES, 7]),
+ "pseudo_beta": (np.float32, [NUM_RES, 3]),
+ "pseudo_beta_mask": (np.float32, [NUM_RES]),
+ "chi_mask": (np.float32, [NUM_RES, 4]),
+ "backbone_affine_mask": (np.float32, [NUM_RES]),
+}
+
+feature_list = {
+ 'aatype': [NUM_RES],
+ 'all_atom_mask': [NUM_RES, None],
+ 'all_atom_positions': [NUM_RES, None, None],
+ 'alt_chi_angles': [NUM_RES, None],
+ 'atom14_alt_gt_exists': [NUM_RES, None],
+ 'atom14_alt_gt_positions': [NUM_RES, None, None],
+ 'atom14_atom_exists': [NUM_RES, None],
+ 'atom14_atom_is_ambiguous': [NUM_RES, None],
+ 'atom14_gt_exists': [NUM_RES, None],
+ 'atom14_gt_positions': [NUM_RES, None, None],
+ 'atom37_atom_exists': [NUM_RES, None],
+ 'backbone_affine_mask': [NUM_RES],
+ 'backbone_affine_tensor': [NUM_RES, None],
+ 'bert_mask': [NUM_MSA_SEQ, NUM_RES],
+ 'chi_angles': [NUM_RES, None],
+ 'chi_mask': [NUM_RES, None],
+ 'extra_deletion_value': [NUM_EXTRA_SEQ, NUM_RES],
+ 'extra_has_deletion': [NUM_EXTRA_SEQ, NUM_RES],
+ 'extra_msa': [NUM_EXTRA_SEQ, NUM_RES],
+ 'extra_msa_mask': [NUM_EXTRA_SEQ, NUM_RES],
+ 'extra_msa_row_mask': [NUM_EXTRA_SEQ],
+ 'is_distillation': [],
+ 'msa_feat': [NUM_MSA_SEQ, NUM_RES, None],
+ 'msa_mask': [NUM_MSA_SEQ, NUM_RES],
+ 'msa_row_mask': [NUM_MSA_SEQ],
+ 'pseudo_beta': [NUM_RES, None],
+ 'pseudo_beta_mask': [NUM_RES],
+ 'random_crop_to_size_seed': [None],
+ 'residue_index': [NUM_RES],
+ 'residx_atom14_to_atom37': [NUM_RES, None],
+ 'residx_atom37_to_atom14': [NUM_RES, None],
+ 'resolution': [],
+ 'rigidgroups_alt_gt_frames': [NUM_RES, None, None],
+ 'rigidgroups_group_exists': [NUM_RES, None],
+ 'rigidgroups_group_is_ambiguous': [NUM_RES, None],
+ 'rigidgroups_gt_exists': [NUM_RES, None],
+ 'rigidgroups_gt_frames': [NUM_RES, None, None],
+ 'seq_length': [],
+ 'seq_mask': [NUM_RES],
+ 'target_feat': [NUM_RES, None],
+ 'template_aatype': [NUM_TEMPLATES, NUM_RES],
+ 'template_all_atom_masks': [NUM_TEMPLATES, NUM_RES, None],
+ 'template_all_atom_positions': [
+ NUM_TEMPLATES, NUM_RES, None, None],
+ 'template_backbone_affine_mask': [NUM_TEMPLATES, NUM_RES],
+ 'template_backbone_affine_tensor': [
+ NUM_TEMPLATES, NUM_RES, None],
+ 'template_mask': [NUM_TEMPLATES],
+ 'template_pseudo_beta': [NUM_TEMPLATES, NUM_RES, None],
+ 'template_pseudo_beta_mask': [NUM_TEMPLATES, NUM_RES],
+ 'template_sum_probs': [NUM_TEMPLATES, None],
+ 'true_msa': [NUM_MSA_SEQ, NUM_RES],
+ 'torsion_angles_sin_cos': [NUM_RES, None, None],
+ 'msa_input': [NUM_MSA_SEQ, NUM_RES, 2],
+ 'query_input': [NUM_RES, 2],
+ 'additional_input': [NUM_RES, 4],
+ 'random_data': [NUM_NOISE, NUM_MSA_SEQ, NUM_RES, NUM_LATENT_DIM],
+ 'context_mask': [NUM_MSA_SEQ, 2]
+}
+
+
+def feature_shape(feature_name, num_residues, msa_length, num_templates, features=None):
+ """Get the shape for the given feature name."""
+ features = features or FEATURES
+ if feature_name.endswith("_unnormalized"):
+ feature_name = feature_name[:-13]
+ unused_dtype, raw_sizes = features.get(feature_name, (None, None))
+ replacements = {NUM_RES: num_residues,
+ NUM_SEQ: msa_length}
+
+ if num_templates is not None:
+ replacements[NUM_TEMPLATES] = num_templates
+
+ sizes = [replacements.get(dimension, dimension) for dimension in raw_sizes]
+ for dimension in sizes:
+ if isinstance(dimension, str):
+ raise ValueError("Could not parse %s (shape: %s) with values: %s" % (
+ feature_name, raw_sizes, replacements))
+ size_r = [int(x) for x in sizes]
+ return size_r
+
+
+def parse_reshape_logic(parsed_features, features, num_template, key=None):
+ """Transforms parsed serial features to the correct shape."""
+ # Find out what is the number of sequences and the number of alignments.
+ num_residues = np.reshape(parsed_features['seq_length'].astype(np.int32), (-1,))[0]
+
+ if "num_alignments" in parsed_features:
+ num_msa = np.reshape(parsed_features["num_alignments"].astype(np.int32), (-1,))[0]
+ else:
+ num_msa = 0
+
+ if key is not None and "key" in features:
+ parsed_features["key"] = [key] # Expand dims from () to (1,).
+
+ # Reshape the arrays according to the sequence length and num alignments.
+ for k, v in parsed_features.items():
+ new_shape = feature_shape(
+ feature_name=k,
+ num_residues=num_residues,
+ msa_length=num_msa,
+ num_templates=num_template,
+ features=features)
+ new_shape_size = 1
+ for dim in new_shape:
+ new_shape_size *= dim
+
+ if np.size(v) != new_shape_size:
+ raise ValueError("the size of feature {} ({}) could not be reshaped into {}"
+ "".format(k, np.size(v), new_shape))
+
+ if "template" not in k:
+ # Make sure the feature we are reshaping is not empty.
+ if np.size(v) <= 0:
+ raise ValueError("The feature {} is not empty.".format(k))
+ parsed_features[k] = np.reshape(v, new_shape)
+
+ return parsed_features
+
+
+def _make_features_metadata(feature_names):
+ """Makes a feature name to type and shape mapping from a list of names."""
+ # Make sure these features are always read.
+ required_features = ["sequence", "domain_name", "template_domain_names"]
+ feature_names = list(set(feature_names) - set(required_features))
+
+ features_metadata = {name: FEATURES.get(name) for name in feature_names}
+ return features_metadata
+
+
+def np_to_array_dict(np_example, features):
+ """Creates dict of arrays.
+
+ Args:
+ np_example: A dict of NumPy feature arrays.
+ features: A list of strings of feature names to be returned in the dataset.
+
+ Returns:
+ A dictionary of features mapping feature names to features. Only the given
+ features are returned, all other ones are filtered out.
+ """
+ features_metadata = _make_features_metadata(features)
+ array_dict = {k: v for k, v in np_example.items() if k in features_metadata}
+ if "template_domain_names" in np_example:
+ num_template = len(np_example["template_domain_names"])
+ else:
+ num_template = 0
+
+ # Ensures shapes are as expected. Needed for setting size of empty features
+ # e.g. when no template hits were found.
+ array_dict = parse_reshape_logic(array_dict, features_metadata, num_template)
+ array_dict['template_mask'] = np.ones([num_template], np.float32)
+ return array_dict
+
+
+class Feature:
+ """feature process"""
+
+ def __init__(self, cfg, raw_feature=None, is_training=False, model_cfg=None, is_evogen=False):
+ if raw_feature and isinstance(raw_feature, dict):
+ self.ensemble_num = 0
+ self.cfg = cfg
+ self.model_cfg = model_cfg
+ if 'deletion_matrix_int' in raw_feature:
+ raw_feature['deletion_matrix'] = (raw_feature.pop('deletion_matrix_int').astype(np.float32))
+ feature_names = cfg.common.unsupervised_features
+ if cfg.common.use_templates:
+ feature_names += cfg.common.template_features
+ self.is_training = is_training
+ self.is_evogen = is_evogen
+ if self.is_training:
+ feature_names += cfg.common.supervised_features
+ raw_feature = np_to_array_dict(np_example=raw_feature, features=feature_names)
+
+ for key in raw_feature:
+ setattr(self, key, raw_feature[key])
+
+ def non_ensemble(self, distillation=False, replace_proportion=0.0, use_templates=True):
+ """non ensemble"""
+ if self.is_evogen:
+ msa, msa_input = correct_msa_restypes(self.msa, self.deletion_matrix, self.is_evogen)
+ setattr(self, "msa", msa)
+ setattr(self, "msa_input", msa_input.astype(np.float32))
+ else:
+ setattr(self, "msa", correct_msa_restypes(self.msa))
+ setattr(self, "is_distillation", np.array(float(distillation), dtype=np.float32))
+ # convert int64 to int32
+ for k, v in vars(self).items():
+ if k not in ("ensemble_num", "is_training", "is_evogen", "cfg", "model_cfg"):
+ if v.dtype == np.int64:
+ setattr(self, k, v.astype(np.int32))
+ aatype = np.argmax(self.aatype, axis=-1)
+ setattr(self, "aatype", aatype.astype(np.int32))
+ if self.is_evogen:
+ query_input = np.concatenate((aatype[:, None], self.deletion_matrix[0]),
+ axis=-1).astype(np.int32)
+ setattr(self, "query_input", query_input.astype(np.float32))
+ data = vars(self)
+ for k in ['msa', 'num_alignments', 'seq_length', 'sequence', 'superfamily', 'deletion_matrix',
+ 'resolution', 'between_segment_residues', 'residue_index', 'template_all_atom_masks']:
+ if k in data:
+ final_dim = data[k].shape[-1]
+ if isinstance(final_dim, int) and final_dim == 1:
+ setattr(self, k, np.squeeze(data[k], axis=-1))
+ # Remove fake sequence dimension
+ for k in ['seq_length', 'num_alignments']:
+ if k in data:
+ setattr(self, k, data[k][0])
+
+ msa, aatype = randomly_replace_msa_with_unknown(self.msa, self.aatype, replace_proportion)
+ setattr(self, "msa", msa)
+ setattr(self, "aatype", aatype)
+ # seq_mask
+ seq_mask = np.ones(self.aatype.shape, dtype=np.float32)
+ setattr(self, "seq_mask", seq_mask)
+ # msa_mask and msa_row_mask
+ msa_mask = np.ones(self.msa.shape, dtype=np.float32)
+ msa_row_mask = np.ones(self.msa.shape[0], dtype=np.float32)
+ setattr(self, "msa_mask", msa_mask)
+ setattr(self, "msa_row_mask", msa_row_mask)
+ if 'hhblits_profile' not in data:
+ # Compute the profile for every residue (over all MSA sequences).
+ setattr(self, 'hhblits_profile', np.mean(one_hot(22, self.msa), axis=0))
+
+ if use_templates:
+ template_aatype = fix_templates_aatype(self.template_aatype)
+ setattr(self, "template_aatype", template_aatype)
+ template_pseudo_beta, template_pseudo_beta_mask = pseudo_beta_fn(self.template_aatype,
+ self.template_all_atom_positions,
+ self.template_all_atom_masks)
+ setattr(self, "template_pseudo_beta", template_pseudo_beta)
+ setattr(self, "template_pseudo_beta_mask", template_pseudo_beta_mask)
+
+ atom14_atom_exists, residx_atom14_to_atom37, residx_atom37_to_atom14, atom37_atom_exists = \
+ make_atom14_masks(self.aatype)
+ setattr(self, "atom14_atom_exists", atom14_atom_exists)
+ setattr(self, "residx_atom14_to_atom37", residx_atom14_to_atom37)
+ setattr(self, "residx_atom37_to_atom14", residx_atom37_to_atom14)
+ setattr(self, "atom37_atom_exists", atom37_atom_exists)
+
+ def ensemble(self, data, msa_fraction_per_block=0.3, randomize_num_blocks=True, num_blocks=5, keep_extra=True,
+ max_msa_clusters=124, masked_msa=None, uniform_prob=0.1, profile_prob=0.1, same_prob=0.1,
+ replace_fraction=0.15, msa_cluster_features=True, max_extra_msa=1024, crop_size=256, max_templates=4,
+ subsample_templates=True, fixed_size=True, seed=0):
+ """ensemble"""
+ self.ensemble_num += 1
+ if self.is_training:
+ keep_indices = block_delete_msa_indices(data["msa"], msa_fraction_per_block, randomize_num_blocks,
+ num_blocks)
+ for k in _MSA_FEATURE_NAMES:
+ if k in data:
+ data[k] = data[k][keep_indices]
+ # exist numpy random op
+ is_sel, not_sel_seq, sel_seq = sample_msa(data["msa"], max_msa_clusters)
+ for k in _MSA_FEATURE_NAMES:
+ if k in data:
+ if keep_extra and not is_sel:
+ new_shape = list(data[k].shape)
+ new_shape[0] = 1
+ data['extra_' + k] = np.zeros(new_shape)
+ elif keep_extra and is_sel:
+ data['extra_' + k] = data[k][not_sel_seq]
+ if k == 'msa':
+ data['extra_msa'] = data['extra_msa'].astype(np.int32)
+ data[k] = data[k][sel_seq]
+ if masked_msa:
+ if self.is_evogen:
+ make_masked_msa_result = make_masked_msa(
+ data["msa"], data["hhblits_profile"],
+ uniform_prob, profile_prob,
+ same_prob,
+ replace_fraction,
+ data['residue_index'], data['msa_mask'], self.is_evogen)
+ data["bert_mask"], data["true_msa"], data["msa"], data["additional_input"] = make_masked_msa_result
+ data["additional_input"] = data["additional_input"].astype(np.float32)
+ else:
+ data["bert_mask"], data["true_msa"], data["msa"] = make_masked_msa(data["msa"], data["hhblits_profile"],
+ uniform_prob, profile_prob,
+ same_prob,
+ replace_fraction)
+
+ if msa_cluster_features:
+ data["extra_cluster_assignment"] = nearest_neighbor_clusters(data["msa_mask"], data["msa"],
+ data["extra_msa_mask"], data["extra_msa"])
+ data["cluster_profile"], data["cluster_deletion_mean"] = summarize_clusters(data["msa"], data["msa_mask"],
+ data[
+ "extra_cluster_assignment"],
+ data["extra_msa_mask"],
+ data["extra_msa"],
+ data["extra_deletion_matrix"],
+ data["deletion_matrix"])
+
+ if max_extra_msa:
+ select_indices = crop_extra_msa(data["extra_msa"], max_extra_msa)
+ if select_indices:
+ for k in _MSA_FEATURE_NAMES:
+ if 'extra_' + k in data:
+ data['extra_' + k] = data['extra_' + k][select_indices]
+ else:
+ for k in _MSA_FEATURE_NAMES:
+ if 'extra_' + k in data:
+ del data['extra_' + k]
+ data["extra_has_deletion"], data["extra_deletion_value"], data["msa_feat"], data["target_feat"] = make_msa_feat(
+ data["between_segment_residues"], data["aatype"], data["msa"], data["deletion_matrix"],
+ data["cluster_deletion_mean"], data["cluster_profile"], data["extra_deletion_matrix"])
+
+ if fixed_size:
+ data = {k: v for k, v in data.items() if k in feature_list}
+
+ num_res_crop_size, num_templates_crop_size_int, num_res_crop_start, num_res_crop_size_int, \
+ templates_crop_start, templates_select_indices = random_crop_to_size(
+ data["seq_length"], data["template_mask"], crop_size, max_templates,
+ subsample_templates, seed, True)
+ for k, v in data.items():
+ if k not in feature_list or ('template' not in k and NUM_RES not in feature_list.get(k)):
+ continue
+
+ # randomly permute the templates before cropping them.
+ if k.startswith('template') and subsample_templates:
+ v = v[templates_select_indices]
+
+ crop_sizes = []
+ crop_starts = []
+ for i, (dim_size, dim) in enumerate(zip(feature_list.get(k), v.shape)):
+ is_num_res = (dim_size == NUM_RES)
+ if i == 0 and k.startswith('template'):
+ crop_size_ = num_templates_crop_size_int
+ crop_start = templates_crop_start
+ else:
+ crop_start = num_res_crop_start if is_num_res else 0
+ crop_size_ = (num_res_crop_size_int if is_num_res else (-1 if dim is None else dim))
+ crop_sizes.append(crop_size_)
+ crop_starts.append(crop_start)
+ if len(v.shape) == 1:
+ data[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0]]
+ elif len(v.shape) == 2:
+ data[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0],
+ crop_starts[1]:crop_starts[1] + crop_sizes[1]]
+ elif len(v.shape) == 3:
+ data[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0],
+ crop_starts[1]:crop_starts[1] + crop_sizes[1],
+ crop_starts[2]:crop_starts[2] + crop_sizes[2]]
+ else:
+ data[k] = v[crop_starts[0]:crop_starts[0] + crop_sizes[0],
+ crop_starts[1]:crop_starts[1] + crop_sizes[1],
+ crop_starts[2]:crop_starts[2] + crop_sizes[2],
+ crop_starts[3]:crop_starts[3] + crop_sizes[3]]
+
+ data["seq_length"] = num_res_crop_size
+
+ pad_size_map = {
+ NUM_RES: crop_size,
+ NUM_MSA_SEQ: max_msa_clusters,
+ NUM_EXTRA_SEQ: max_extra_msa,
+ NUM_TEMPLATES: max_templates,
+ }
+
+ for k, v in data.items():
+ if k == 'extra_cluster_assignment':
+ continue
+ shape = list(v.shape)
+ schema = feature_list.get(k)
+ assert len(shape) == len(
+ schema), f'Rank mismatch between shape and shape schema for {k}: {shape} vs {schema}'
+
+ pad_size = [pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)]
+ padding = [(0, p - v.shape[i]) for i, p in enumerate(pad_size)]
+ if padding:
+ data[k] = np.pad(v, padding)
+ data[k].reshape(pad_size)
+ else:
+ for k, v in data.items():
+ if k.startswith('template_'):
+ data[k] = v[:max_templates]
+ if self.is_evogen:
+ data["random_data"], data["context_mask"] = generate_random_sample(self.cfg, self.model_cfg)
+ data["context_mask"] = data["context_mask"].astype(np.float32)
+ return data
+
+ def process_res(self, features, res, dtype):
+ """process result"""
+ arrays, prev_pos, prev_msa_first_row, prev_pair = res
+ if self.is_evogen:
+ evogen_keys = ["target_feat", "seq_mask", "aatype", "residx_atom37_to_atom14", "atom37_atom_exists",
+ "residue_index", "msa_mask", "msa_input", "query_input", "additional_input", "random_data",
+ "context_mask"]
+ arrays = [features[key] for key in evogen_keys]
+ arrays = [array.astype(dtype) if array.dtype == "float64" else array for array in arrays]
+ arrays = [array.astype(dtype) if array.dtype == "float32" else array for array in arrays]
+ res = [arrays, prev_pos, prev_msa_first_row, prev_pair]
+ return res
+ if self.is_training:
+ label_keys = ["pseudo_beta", "pseudo_beta_mask", "all_atom_mask",
+ "true_msa", "bert_mask", "residue_index", "seq_mask",
+ "atom37_atom_exists", "aatype", "residx_atom14_to_atom37",
+ "atom14_atom_exists", "backbone_affine_tensor", "backbone_affine_mask",
+ "atom14_gt_positions", "atom14_alt_gt_positions",
+ "atom14_atom_is_ambiguous", "atom14_gt_exists", "atom14_alt_gt_exists",
+ "all_atom_positions", "rigidgroups_gt_frames", "rigidgroups_gt_exists",
+ "rigidgroups_alt_gt_frames", "torsion_angles_sin_cos", "chi_mask"]
+ label_arrays = [features[key] for key in label_keys]
+ label_arrays = [array[0] for array in label_arrays]
+ label_arrays = [array.astype(dtype) if array.dtype == "float64" else array for array in label_arrays]
+ label_arrays = [array.astype(dtype) if array.dtype == "float32" else array for array in label_arrays]
+ res = [arrays, prev_pos, prev_msa_first_row, prev_pair, label_arrays]
+ return res
+ return res
+
+ def pipeline(self, cfg, mixed_precision=True, seed=0):
+ """feature process pipeline"""
+ self.non_ensemble(cfg.common.distillation, cfg.common.replace_proportion, cfg.common.use_templates)
+ non_ensemble_data = vars(self).copy()
+ max_msa_clusters = cfg.eval.max_msa_clusters
+ if cfg.common.reduce_msa_clusters_by_max_templates and not self.is_evogen:
+ max_msa_clusters = cfg.eval.max_msa_clusters - cfg.eval.max_templates
+ non_ensemble_data_copy = non_ensemble_data.copy()
+ protein = self.ensemble(non_ensemble_data_copy,
+ cfg.block_deletion.msa_fraction_per_block,
+ cfg.block_deletion.randomize_num_blocks,
+ cfg.block_deletion.num_blocks,
+ cfg.eval.keep_extra,
+ max_msa_clusters,
+ cfg.common.masked_msa.use_masked_msa,
+ cfg.common.masked_msa.uniform_prob,
+ cfg.common.masked_msa.profile_prob,
+ cfg.common.masked_msa.same_prob,
+ cfg.eval.masked_msa_replace_fraction,
+ cfg.common.msa_cluster_features,
+ cfg.common.max_extra_msa,
+ cfg.eval.crop_size,
+ cfg.eval.max_templates,
+ cfg.eval.subsample_templates,
+ cfg.eval.fixed_size, seed)
+
+ num_ensemble = cfg.eval.num_ensemble
+ num_recycle = cfg.common.num_recycle
+ if cfg.common.resample_msa_in_recycling:
+ num_ensemble *= num_recycle
+ result_array = {x: () for x in protein.keys()}
+ if num_ensemble > 1:
+ for _ in range(num_ensemble):
+ non_ensemble_data_copy = non_ensemble_data.copy()
+ data_t = self.ensemble(non_ensemble_data_copy,
+ cfg.block_deletion.msa_fraction_per_block,
+ cfg.block_deletion.randomize_num_blocks,
+ cfg.block_deletion.num_blocks, cfg.eval.keep_extra,
+ max_msa_clusters, cfg.common.masked_msa.use_masked_msa,
+ cfg.common.masked_msa.uniform_prob, cfg.common.masked_msa.profile_prob,
+ cfg.common.masked_msa.same_prob, cfg.eval.masked_msa_replace_fraction,
+ cfg.common.msa_cluster_features, cfg.common.max_extra_msa,
+ cfg.eval.crop_size, cfg.eval.max_templates, cfg.eval.subsample_templates,
+ cfg.eval.fixed_size, seed)
+ for key in protein.keys():
+ result_array[key] += (data_t[key][None],)
+ for key in protein.keys():
+ result_array[key] = np.concatenate(result_array[key], axis=0)
+ else:
+ result_array = {key: protein[key][None] for key in protein.keys()}
+ features = {k: v for k, v in result_array.items() if v.dtype != 'O'}
+ extra_msa_length = cfg.common.max_extra_msa
+ for key in ["extra_msa", "extra_has_deletion", "extra_deletion_value", "extra_msa_mask"]:
+ features[key] = features[key][:, :extra_msa_length]
+ input_keys = ['target_feat', 'msa_feat', 'msa_mask', 'seq_mask', 'aatype', 'template_aatype',
+ 'template_all_atom_masks', 'template_all_atom_positions', 'template_mask',
+ 'template_pseudo_beta_mask', 'template_pseudo_beta',
+ 'extra_msa', 'extra_has_deletion', 'extra_deletion_value', 'extra_msa_mask',
+ 'residx_atom37_to_atom14', 'atom37_atom_exists', 'residue_index']
+
+ dtype = np.float32
+ if mixed_precision:
+ dtype = np.float16
+ arrays = [features[key] for key in input_keys]
+ arrays = [array.astype(dtype) if array.dtype == "float64" else array for array in arrays]
+ arrays = [array.astype(dtype) if array.dtype == "float32" else array for array in arrays]
+ prev_pos = np.zeros([cfg.eval.crop_size, 37, 3]).astype(dtype)
+ prev_msa_first_row = np.zeros([cfg.eval.crop_size, 256]).astype(dtype)
+ prev_pair = np.zeros([cfg.eval.crop_size, cfg.eval.crop_size, 128]).astype(dtype)
+ res = [arrays, prev_pos, prev_msa_first_row, prev_pair]
+ res = self.process_res(features, res, dtype)
+ return res
diff --git a/MindSPONGE/applications/research/FAAST/data/protein_feature.py b/MindSPONGE/applications/research/FAAST/data/protein_feature.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca683bcb893f615535a13f205500385a7ea66b11
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/data/protein_feature.py
@@ -0,0 +1,166 @@
+# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+protein feature generation module.
+"""
+import os
+import stat
+import pickle
+import numpy as np
+from data.hhsearch import HHSearch
+from data.msa_query import MmseqQuery
+from data.parsers import parse_fasta, parse_hhr, parse_a3m
+from data.templates import TemplateHitFeaturizer
+from mindsponge.common import residue_constants
+from search import colabsearch
+
+
+def make_msa_features(msas, deletion_matrices):
+ """Constructs a feature dict of MSA features."""
+ if not msas:
+ raise ValueError('At least one MSA must be provided.')
+
+ int_msa = []
+ deletion_matrix = []
+ seen_sequences = set()
+ for msa_index, msa in enumerate(msas):
+ if not msa:
+ raise ValueError(f'MSA {msa_index} must contain at least one sequence.')
+ for sequence_index, sequence in enumerate(msa):
+ if sequence in seen_sequences:
+ continue
+ seen_sequences.add(sequence)
+ int_msa.append([residue_constants.HHBLITS_AA_TO_ID[res] for res in sequence])
+ deletion_matrix.append(deletion_matrices[msa_index][sequence_index])
+
+ num_res = len(msas[0][0])
+ num_alignments = len(int_msa)
+ features = {'deletion_matrix_int': np.array(deletion_matrix, dtype=np.int32),
+ 'deletion_matrix_int_all_seq': np.array(deletion_matrix, dtype=np.int32),
+ 'msa': np.array(int_msa, dtype=np.int32),
+ 'msa_all_seq': np.array(int_msa, dtype=np.int32),
+ 'num_alignments': np.array([num_alignments] * num_res, dtype=np.int32),
+ 'msa_species_identifiers_all_seq': np.array([b''] * num_alignments)}
+ return features
+
+
+def make_sequence_features(sequence: str, description: str, num_res: int):
+ """Constructs a feature dict of sequence features."""
+ features = {'aatype': residue_constants.sequence_to_onehot(sequence=sequence,
+ mapping=residue_constants.restype_order_with_x,
+ map_unknown_to_x=True),
+ 'between_segment_residues': np.zeros((num_res,), dtype=np.int32),
+ 'domain_name': np.array([description.encode('utf-8')], dtype=np.object_),
+ 'residue_index': np.array(range(num_res), dtype=np.int32),
+ 'seq_length': np.array([num_res] * num_res, dtype=np.int32),
+ 'sequence': np.array([sequence.encode('utf-8')], dtype=np.object_)}
+ return features
+
+
+class RawFeatureGenerator:
+ """Runs the alignment tools"""
+
+ def __init__(self, database_search_config, a3m_path, templatepath, use_custom, use_template, max_hits=20,
+ msa_length=512):
+ """Search the a3m info for a given FASTA file."""
+
+ self.template_path = templatepath
+ self.use_template = use_template
+ self.template_mmcif_dir = f"{self.template_path}/"
+ self.max_template_date = database_search_config.max_template_date
+ self.kalign_binary_path = database_search_config.kalign_binary_path
+ self.hhsearch_binary_path = database_search_config.hhsearch_binary_path
+ self.pdb70_database_path = f"{self.template_path}/pdb70"
+ self.a3m_result_path = a3m_path
+ self.database_envdb_dir = database_search_config.database_envdb_dir
+ self.mmseqs_binary = database_search_config.mmseqs_binary
+ self.uniref30_path = database_search_config.uniref30_path
+ self.max_hits = max_hits
+ self.msa_length = msa_length
+
+ self.msa_query = MmseqQuery(database_envdb_dir=self.database_envdb_dir,
+ mmseqs_binary=self.mmseqs_binary,
+ uniref30_path=self.uniref30_path,
+ result_path=self.a3m_result_path)
+ self.use_custom = use_custom
+
+ def monomer_feature_generate(self, fasta_path, prot_name):
+ """protein raw feature generation"""
+ with open(fasta_path) as f:
+ input_fasta_str = f.read()
+ input_seqs, input_descs = parse_fasta(input_fasta_str)
+ if not self.use_custom:
+ colabsearch(input_seqs, self.a3m_result_path, self.template_path)
+ if self.use_template:
+ hhsearch_pdb70_runner = HHSearch(binary_path=self.hhsearch_binary_path,
+ databases=[self.pdb70_database_path])
+ template_featurizer = TemplateHitFeaturizer(mmcif_dir=self.template_mmcif_dir,
+ max_template_date=self.max_template_date,
+ max_hits=self.max_hits,
+ kalign_binary_path=self.kalign_binary_path,
+ release_dates_path=None)
+ if len(input_seqs) != 1:
+ raise ValueError(f'More than one input sequence found in {fasta_path}.')
+ input_sequence = input_seqs[0]
+ input_description = input_descs[0]
+
+ num_res = len(input_sequence)
+
+ a3m_lines = self.msa_query.aligned_a3m_files(self.a3m_result_path)
+
+ if self.use_template:
+ hhsearch_result = hhsearch_pdb70_runner.query(a3m_lines)
+ hhsearch_hits = parse_hhr(hhsearch_result)
+ templates_result = template_featurizer.get_templates(
+ query_sequence=input_sequence,
+ query_pdb_code=None,
+ query_release_date=None,
+ hhr_hits=hhsearch_hits)
+
+ msas, deletion_matrices = parse_a3m(a3m_lines)
+
+ sequence_features = make_sequence_features(
+ sequence=input_sequence,
+ description=input_description,
+ num_res=num_res)
+ msa_features = make_msa_features(msas=(msas,), deletion_matrices=(deletion_matrices,))
+ features = {}
+ shape0 = 20
+ shape1 = 22
+ shape2 = 37
+ shape4 = 3
+ shape5 = 1
+ features["template_aatype"] = np.zeros((shape0, num_res, shape1))
+ features["template_all_atom_masks"] = np.zeros((shape0, num_res, shape2))
+ features["template_all_atom_positions"] = np.zeros((shape0, num_res, shape2, shape4))
+ features["template_domain_names"] = np.zeros((shape0,))
+ features["template_e_value"] = np.zeros((shape0, shape5))
+ features["template_neff"] = np.zeros((shape0, shape5))
+ features["template_prob_true"] = np.zeros((shape0, shape5))
+ features["template_similarity"] = np.zeros((shape0, shape5))
+ features["template_sequence"] = np.zeros((shape0, shape5))
+ features["template_sum_probs"] = np.zeros((shape0, shape5))
+ features["template_confidence_scores"] = np.zeros((shape0, num_res))
+ if self.use_template:
+ features = templates_result.features
+
+ feature_dict = {**sequence_features, **msa_features, **features}
+ os.makedirs("./pkl_file/", exist_ok=True)
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(f"./pkl_file/{prot_name}.pkl", os_flags, os_modes), "wb") as fout:
+ pickle.dump(feature_dict, fout)
+ f.close()
+ return feature_dict
diff --git a/MindSPONGE/applications/research/FAAST/data/templates.py b/MindSPONGE/applications/research/FAAST/data/templates.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3dffa739244b4fd055fccabe63a86f1762dbb36
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/data/templates.py
@@ -0,0 +1,899 @@
+# Copyright 2021 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+'''template'''
+import datetime
+import glob
+import os
+import re
+import dataclasses
+from typing import Any, Mapping, Optional, Sequence, Tuple
+from absl import logging
+import numpy as np
+
+from mindsponge.common import residue_constants
+from data.kalign import Kalign
+from data.parsers import parse_mmcif, parse_a3m
+
+
+class Error(Exception):
+ """Base class for exceptions."""
+
+
+class NoChainsError(Error):
+ """An error indicating that template mmCIF didn't have any chains."""
+
+
+class SequenceNotInTemplateError(Error):
+ """An error indicating that template mmCIF didn't contain the sequence."""
+
+
+class NoAtomDataInTemplateError(Error):
+ """An error indicating that template mmCIF didn't contain atom positions."""
+
+
+class TemplateAtomMaskAllZerosError(Error):
+ """An error indicating that template mmCIF had all atom positions masked."""
+
+
+class QueryToTemplateAlignError(Error):
+ """An error indicating that the query can't be aligned to the template."""
+
+
+class CaDistanceError(Error):
+ """An error indicating that a CA atom distance exceeds a threshold."""
+
+
+class MultipleChainsError(Error):
+ """An error indicating that multiple chains were found for a given ID."""
+
+
+# Prefilter exceptions.
+class PrefilterError(Exception):
+ """A base class for template prefilter exceptions."""
+
+
+class DateError(PrefilterError):
+ """An error indicating that the hit date was after the max allowed date."""
+
+
+class PdbIdError(PrefilterError):
+ """An error indicating that the hit PDB ID was identical to the query."""
+
+
+class AlignRatioError(PrefilterError):
+ """An error indicating that the hit align ratio to the query was too small."""
+
+
+class DuplicateError(PrefilterError):
+ """An error indicating that the hit was an exact subsequence of the query."""
+
+
+class LengthError(PrefilterError):
+ """An error indicating that the hit was too short."""
+
+
+TEMPLATE_FEATURES = {
+ 'template_aatype': np.float32,
+ 'template_all_atom_masks': np.float32,
+ 'template_all_atom_positions': np.float32,
+ 'template_domain_names': np.object,
+ 'template_e_value': np.float32,
+ 'template_neff': np.float32,
+ 'template_prob_true': np.float32,
+ 'template_release_date': np.object,
+ 'template_score': np.float32,
+ 'template_similarity': np.float32,
+ 'template_sequence': np.object,
+ 'template_sum_probs': np.float32,
+ 'template_confidence_scores': np.int64
+}
+
+
+def _get_pdb_id_and_chain(hit):
+ """Returns PDB id and chain id for an HHSearch Hit."""
+ # PDB ID: 4 letters. Chain ID: 1+ alphanumeric letters or "." if unknown.
+ id_match = re.match(r'[a-zA-Z\d]{4}_[a-zA-Z0-9.]+', hit.name)
+ if not id_match:
+ raise ValueError(f'hit.name did not start with PDBID_chain: {hit.name}')
+ pdb_id, chain_id = id_match.group(0).split('_')
+ return pdb_id.lower(), chain_id
+
+
+def _is_after_cutoff(
+ pdb_id: str,
+ release_dates: Mapping[str, datetime.datetime],
+ release_date_cutoff: Optional[datetime.datetime]) -> bool:
+ """Checks if the template date is after the release date cutoff.
+
+ Args:
+ pdb_id: 4 letter pdb code.
+ release_dates: Dictionary mapping PDB ids to their structure release dates.
+ release_date_cutoff: Max release date that is valid for this query.
+
+ Returns:
+ True if the template release date is after the cutoff, False otherwise.
+ """
+ if release_date_cutoff is None:
+ raise ValueError('The release_date_cutoff must not be None.')
+ if pdb_id in release_dates:
+ return release_dates[pdb_id] > release_date_cutoff
+ return False
+
+
+def _parse_release_dates(path: str) -> Mapping[str, datetime.datetime]:
+ """Parses release dates file, returns a mapping from PDBs to release dates."""
+ if path.endswith('txt'):
+ release_dates = {}
+ with open(path, 'r') as f:
+ for line in f:
+ pdb_id, date = line.split(':')
+ date = date.strip()
+ # Python 3.6 doesn't have datetime.date.fromisoformat() which is about 90x faster than strptime.
+ # However, splitting the string manually is about 10x faster than strptime.
+ release_dates[pdb_id.strip()] = \
+ datetime.datetime(year=int(date[:4]), month=int(date[5:7]), day=int(date[8:10]))
+ return release_dates
+ raise ValueError('Invalid format of the release date file %s.' % path)
+
+
+def _assess_hhsearch_hit(
+ hit,
+ hit_pdb_code,
+ query_sequence,
+ query_pdb_code,
+ release_dates,
+ release_date_cutoff,
+ max_subsequence_ratio=0.95,
+ min_align_ratio=0.1):
+ """Determines if template is valid (without parsing the template mmcif file).
+
+ Args:
+ hit: HhrHit for the template.
+ hit_pdb_code: The 4 letter pdb code of the template hit. This might be
+ different from the value in the actual hit since the original pdb might
+ have become obsolete.
+ query_sequence: Amino acid sequence of the query.
+ query_pdb_code: 4 letter pdb code of the query.
+ release_dates: Dictionary mapping pdb codes to their structure release
+ dates.
+ release_date_cutoff: Max release date that is valid for this query.
+ max_subsequence_ratio: Exclude any exact matches with this much overlap.
+ min_align_ratio: Minimum overlap between the template and query.
+
+ Returns:
+ True if the hit passed the prefilter. Raises an exception otherwise.
+
+ Raises:
+ DateError: If the hit date was after the max allowed date.
+ PdbIdError: If the hit PDB ID was identical to the query.
+ AlignRatioError: If the hit align ratio to the query was too small.
+ DuplicateError: If the hit was an exact subsequence of the query.
+ LengthError: If the hit was too short.
+ """
+ aligned_cols = hit.aligned_cols
+ align_ratio = aligned_cols / len(query_sequence)
+
+ template_sequence = hit.hit_sequence.replace('-', '')
+ length_ratio = float(len(template_sequence)) / len(query_sequence)
+
+ # Check whether the template is a large subsequence or duplicate of original
+ # query. This can happen due to duplicate entries in the PDB database.
+ duplicate = (template_sequence in query_sequence and length_ratio > max_subsequence_ratio)
+ if _is_after_cutoff(hit_pdb_code, release_dates, release_date_cutoff):
+ raise DateError(f'Date ({release_dates[hit_pdb_code]}) > max template date ({release_date_cutoff}).')
+
+ if query_pdb_code is not None:
+ if query_pdb_code.lower() == hit_pdb_code.lower():
+ raise PdbIdError('PDB code identical to Query PDB code.')
+
+ if align_ratio <= min_align_ratio:
+ raise AlignRatioError(f'Proportion of residues aligned to query too small. Align ratio: {align_ratio}.')
+
+ if duplicate:
+ raise DuplicateError(f'Template is an exact subsequence of query with large coverage.'
+ f' Length ratio: {length_ratio}.')
+
+ if len(template_sequence) < 10:
+ raise LengthError(f'Template too short. Length: {len(template_sequence)}.')
+
+ return True
+
+
+def _find_template_in_pdb(template_chain_id, template_sequence, mmcif_object):
+ """Tries to find the template chain in the given pdb file.
+
+ This method tries the three following things in order:
+ 1. Tries if there is an exact match in both the chain ID and the sequence.
+ If yes, the chain sequence is returned. Otherwise:
+ 2. Tries if there is an exact match only in the sequence.
+ If yes, the chain sequence is returned. Otherwise:
+ 3. Tries if there is a fuzzy match (X = wildcard) in the sequence.
+ If yes, the chain sequence is returned.
+ If none of these succeed, a SequenceNotInTemplateError is thrown.
+
+ Args:
+ template_chain_id: The template chain ID.
+ template_sequence: The template chain sequence.
+ mmcif_object: The PDB object to search for the template in.
+
+ Returns:
+ A tuple with:
+ * The chain sequence that was found to match the template in the PDB object.
+ * The ID of the chain that is being returned.
+ * The offset where the template sequence starts in the chain sequence.
+
+ Raises:
+ SequenceNotInTemplateError: If no match is found after the steps described
+ above.
+ """
+ # Try if there is an exact match in both the chain ID and the
+ # (sub)sequence.
+ pdb_id = mmcif_object.file_id
+ chain_sequence = mmcif_object.chain_to_seqres.get(template_chain_id)
+ if chain_sequence and (template_sequence in chain_sequence):
+ logging.info('Found an exact template match %s_%s.', pdb_id, template_chain_id)
+ mapping_offset = chain_sequence.find(template_sequence)
+ return chain_sequence, template_chain_id, mapping_offset
+
+ # Try if there is an exact match in the (sub)sequence only.
+ for chain_id, chain_sequence in mmcif_object.chain_to_seqres.items():
+ if chain_sequence and (template_sequence in chain_sequence):
+ logging.info(f'Found a sequence-only match {pdb_id}_{chain_id}.')
+ mapping_offset = chain_sequence.find(template_sequence)
+ return chain_sequence, chain_id, mapping_offset
+
+ # Return a chain sequence that fuzzy matches (X = wildcard) the template.
+ # Make parentheses unnamed groups (?:_) to avoid the 100 named groups
+ # limit.
+ regex = ['.' if aa == 'X' else '(?:%s|X)' % aa for aa in template_sequence]
+ regex = re.compile(''.join(regex))
+ for chain_id, chain_sequence in mmcif_object.chain_to_seqres.items():
+ match = re.search(regex, chain_sequence)
+ if match:
+ logging.info(f'Found a fuzzy sequence-only match {pdb_id}_{chain_id}.')
+ mapping_offset = match.start()
+ return chain_sequence, chain_id, mapping_offset
+
+ # No hits, raise an error.
+ raise SequenceNotInTemplateError(
+ 'Could not find the template sequence in %s_%s. Template sequence: %s, '
+ 'chain_to_seqres: %s' % (pdb_id, template_chain_id, template_sequence, mmcif_object.chain_to_seqres))
+
+
+def _realign_pdb_template_to_query(
+ old_template_sequence,
+ template_chain_id,
+ mmcif_object,
+ old_mapping,
+ kalign_binary_path):
+ """Aligns template from the mmcif_object to the query.
+
+ In case PDB70 contains a different version of the template sequence, we need
+ to perform a realignment to the actual sequence that is in the mmCIF file.
+ This method performs such realignment, but returns the new sequence and
+ mapping only if the sequence in the mmCIF file is 90% identical to the old
+ sequence.
+
+ Note that the old_template_sequence comes from the hit, and contains only that
+ part of the chain that matches with the query while the new_template_sequence
+ is the full chain.
+
+ Args:
+ old_template_sequence: The template sequence that was returned by the PDB
+ template search (typically done using HHSearch).
+ template_chain_id: The template chain id was returned by the PDB template
+ search (typically done using HHSearch). This is used to find the right
+ chain in the mmcif_object chain_to_seqres mapping.
+ mmcif_object: A mmcif_object which holds the actual template data.
+ old_mapping: A mapping from the query sequence to the template sequence.
+ This mapping will be used to compute the new mapping from the query
+ sequence to the actual mmcif_object template sequence by aligning the
+ old_template_sequence and the actual template sequence.
+ kalign_binary_path: The path to a kalign executable.
+
+ Returns:
+ A tuple (new_template_sequence, new_query_to_template_mapping) where:
+ * new_template_sequence is the actual template sequence that was found in
+ the mmcif_object.
+ * new_query_to_template_mapping is the new mapping from the query to the
+ actual template found in the mmcif_object.
+
+ Raises:
+ QueryToTemplateAlignError:
+ * If there was an error thrown by the alignment tool.
+ * Or if the actual template sequence differs by more than 10% from the
+ old_template_sequence.
+ """
+ aligner = Kalign(binary_path=kalign_binary_path)
+ new_template_sequence = mmcif_object.chain_to_seqres.get(template_chain_id, '')
+
+ # Sometimes the template chain id is unknown. But if there is only a single
+ # sequence within the mmcif_object, it is safe to assume it is that one.
+ if not new_template_sequence:
+ if len(mmcif_object.chain_to_seqres) == 1:
+ logging.info(f'Could not find {template_chain_id} in {mmcif_object.file_id}, but there is only 1 sequence,'
+ f' so using that one.')
+ new_template_sequence = list(mmcif_object.chain_to_seqres.values())[0]
+ else:
+ raise QueryToTemplateAlignError(
+ f'Could not find chain {template_chain_id} in {mmcif_object.file_id}. '
+ 'If there are no mmCIF parsing errors, it is possible it was not a '
+ 'protein chain.')
+
+ try:
+ (old_aligned_template, new_aligned_template), _ = \
+ parse_a3m(aligner.align([old_template_sequence, new_template_sequence]))
+ except Exception as e:
+ raise QueryToTemplateAlignError(
+ 'Could not align old template %s to template %s (%s_%s). Error: %s' %
+ (old_template_sequence,
+ new_template_sequence,
+ mmcif_object.file_id,
+ template_chain_id,
+ str(e)))
+
+ logging.info(f'Old aligned template: {old_aligned_template}\nNew aligned template: {new_aligned_template}')
+
+ old_to_new_template_mapping = {}
+ old_template_index = -1
+ new_template_index = -1
+ num_same = 0
+ for old_template_aa, new_template_aa in zip(old_aligned_template, new_aligned_template):
+ if old_template_aa != '-':
+ old_template_index += 1
+ if new_template_aa != '-':
+ new_template_index += 1
+ if old_template_aa != '-' and new_template_aa != '-':
+ old_to_new_template_mapping[old_template_index] = new_template_index
+ if old_template_aa == new_template_aa:
+ num_same += 1
+
+ # Require at least 90 % sequence identity wrt to the shorter of the sequences.
+ if float(num_same) / min(len(old_template_sequence), len(new_template_sequence)) < 0.9:
+ raise QueryToTemplateAlignError(
+ 'Insufficient similarity of the sequence in the database: %s to the '
+ 'actual sequence in the mmCIF file %s_%s: %s. We require at least '
+ '90 %% similarity wrt to the shorter of the sequences. This is not a '
+ 'problem unless you think this is a template that should be included.' %
+ (old_template_sequence, mmcif_object.file_id, template_chain_id,
+ new_template_sequence))
+
+ new_query_to_template_mapping = {}
+ for query_index, old_template_index in old_mapping.items():
+ new_query_to_template_mapping[query_index] = (old_to_new_template_mapping.get(old_template_index, -1))
+
+ new_template_sequence = new_template_sequence.replace('-', '')
+
+ return new_template_sequence, new_query_to_template_mapping
+
+
+def _check_residue_distances(all_positions: np.ndarray,
+ all_positions_mask: np.ndarray,
+ max_ca_ca_distance: float):
+ """Checks if the distance between unmasked neighbor residues is ok."""
+ ca_position = residue_constants.atom_order['CA']
+ prev_is_unmasked = False
+ prev_calpha = None
+ for i, (coords, mask) in enumerate(zip(all_positions, all_positions_mask)):
+ this_is_unmasked = bool(mask[ca_position])
+ if this_is_unmasked:
+ this_calpha = coords[ca_position]
+ if prev_is_unmasked:
+ distance = np.linalg.norm(this_calpha - prev_calpha)
+ if distance > max_ca_ca_distance:
+ raise CaDistanceError('The distance between residues %d and %d is %f > limit %f.' %
+ (i, i + 1, distance, max_ca_ca_distance))
+ prev_calpha = this_calpha
+ prev_is_unmasked = this_is_unmasked
+
+
+def _get_atom_positions(
+ mmcif_object,
+ auth_chain_id,
+ max_ca_ca_distance) -> Tuple[np.ndarray, np.ndarray]:
+ """Gets atom positions and mask from a list of Biopython Residues."""
+ num_res = len(mmcif_object.chain_to_seqres[auth_chain_id])
+
+ relevant_chains = [c for c in mmcif_object.structure.get_chains() if c.id == auth_chain_id]
+ if len(relevant_chains) != 1:
+ raise MultipleChainsError(f'Expected exactly one chain in structure with id {auth_chain_id}.')
+ chain = relevant_chains[0]
+
+ all_positions = np.zeros([num_res, residue_constants.atom_type_num, 3])
+ all_positions_mask = np.zeros([num_res, residue_constants.atom_type_num], dtype=np.int64)
+ for res_index in range(num_res):
+ pos = np.zeros([residue_constants.atom_type_num, 3], dtype=np.float32)
+ mask = np.zeros([residue_constants.atom_type_num], dtype=np.float32)
+ res_at_position = mmcif_object.seqres_to_structure[auth_chain_id][res_index]
+ if not res_at_position.is_missing:
+ res = chain[(res_at_position.hetflag,
+ res_at_position.position.residue_number,
+ res_at_position.position.insertion_code)]
+ for atom in res.get_atoms():
+ atom_name = atom.get_name()
+ x, y, z = atom.get_coord()
+ if atom_name in residue_constants.atom_order.keys():
+ pos[residue_constants.atom_order[atom_name]] = [x, y, z]
+ mask[residue_constants.atom_order[atom_name]] = 1.0
+ elif atom_name.upper() == 'SE' and res.get_resname() == 'MSE':
+ # Put the coordinates of the selenium atom in the sulphur
+ # column.
+ pos[residue_constants.atom_order['SD']] = [x, y, z]
+ mask[residue_constants.atom_order['SD']] = 1.0
+
+ all_positions[res_index] = pos
+ all_positions_mask[res_index] = mask
+ _check_residue_distances(all_positions, all_positions_mask, max_ca_ca_distance)
+ return all_positions, all_positions_mask
+
+
+def _extract_template_features(
+ mmcif_object,
+ pdb_id,
+ mapping,
+ template_sequence,
+ query_sequence,
+ template_chain_id,
+ confidence_scores,
+ kalign_binary_path):
+ """Parses atom positions in the target structure and aligns with the query.
+
+ Atoms for each residue in the template structure are indexed to coincide
+ with their corresponding residue in the query sequence, according to the
+ alignment mapping provided.
+
+ Note that we only extract at most 500 templates because of HHSearch settings.
+
+ We set missing/invalid confidence scores to the default value of -1.
+ Note: We now have 4 types of confidence scores:
+ 1. Valid scores
+ 2. Invalid scores of residues not in both the query sequence and template
+ sequence
+ 3. Missing scores because we don't have the secondary structure, and HHAlign
+ doesn't produce the posterior probabilities in this case.
+ 4. Missing scores because of a different template sequence in PDB70,
+ invalidating the previously computed confidence scores. (Though in theory
+ HHAlign can be run on these to recompute the correct confidence scores).
+ We handle invalid and missing scores by setting them to -1, but consider
+ adding masks for the different types.
+
+ Args:
+ mmcif_object: mmcif_parsing.MmcifObject representing the template.
+ pdb_id: PDB code for the template.
+ mapping: Dictionary mapping indices in the query sequence to indices in
+ the template sequence.
+ template_sequence: String describing the amino acid sequence for the
+ template protein.
+ query_sequence: String describing the amino acid sequence for the query
+ protein.
+ template_chain_id: String ID describing which chain in the structure proto
+ should be used.
+ confidence_scores: String containing per-residue confidence scores, where
+ each character represents the *TRUNCATED* posterior probability that the
+ corresponding template residue is correctly aligned with the query
+ residue, given the database match is correct (0 corresponds approximately
+ to 0-10%, 9 to 90-100%).
+ kalign_binary_path: The path to a kalign executable used for template
+ realignment.
+
+ Returns:
+ A tuple with:
+ * A dictionary containing the extra features derived from the template
+ protein structure.
+ * A warning message if the hit was realigned to the actual mmCIF sequence.
+ Otherwise None.
+
+ Raises:
+ NoChainsError: If the mmcif object doesn't contain any chains.
+ SequenceNotInTemplateError: If the given chain id / sequence can't
+ be found in the mmcif object.
+ QueryToTemplateAlignError: If the actual template in the mmCIF file
+ can't be aligned to the query.
+ NoAtomDataInTemplateError: If the mmcif object doesn't contain
+ atom positions.
+ TemplateAtomMaskAllZerosError: If the mmcif object doesn't have any
+ unmasked residues.
+ """
+ if mmcif_object is None or not mmcif_object.chain_to_seqres:
+ raise NoChainsError('No chains in PDB: %s_%s' % (pdb_id, template_chain_id))
+
+ warning = None
+ try:
+ seqres, chain_id, mapping_offset = _find_template_in_pdb(
+ template_chain_id=template_chain_id,
+ template_sequence=template_sequence,
+ mmcif_object=mmcif_object)
+ except SequenceNotInTemplateError:
+ # If PDB70 contains a different version of the template, we use the sequence
+ # from the mmcif_object.
+ chain_id = template_chain_id
+ warning = (f'The exact sequence {template_sequence} was not found in '
+ f'{pdb_id}_{chain_id}. Realigning the template to the actual sequence.')
+ logging.warning(warning)
+ # This throws an exception if it fails to realign the hit.
+ seqres, mapping = _realign_pdb_template_to_query(
+ old_template_sequence=template_sequence,
+ template_chain_id=template_chain_id,
+ mmcif_object=mmcif_object,
+ old_mapping=mapping,
+ kalign_binary_path=kalign_binary_path)
+ logging.info(f'Sequence in {pdb_id}_{chain_id}: {template_sequence} successfully realigned to {seqres}')
+ # The template sequence changed.
+ template_sequence = seqres
+ # No mapping offset, the query is aligned to the actual sequence.
+ mapping_offset = 0
+ # Confidence scores were based on the previous sequence, so they are
+ # invalid
+ confidence_scores = None
+
+ try:
+ # Essentially set to infinity - we don't want to reject templates unless
+ # they're really really bad.
+ all_atom_positions, all_atom_mask = _get_atom_positions(mmcif_object, chain_id, max_ca_ca_distance=150.0)
+ except (CaDistanceError, KeyError) as ex:
+ raise NoAtomDataInTemplateError(f'Could not get atom data ({pdb_id}_{chain_id}): {str(ex)}')
+
+ all_atom_positions = np.split(all_atom_positions, all_atom_positions.shape[0])
+ all_atom_masks = np.split(all_atom_mask, all_atom_mask.shape[0])
+
+ output_templates_sequence = []
+ output_confidence_scores = []
+ templates_all_atom_positions = []
+ templates_all_atom_masks = []
+
+ for _ in query_sequence:
+ # Residues in the query_sequence that are not in the template_sequence:
+ templates_all_atom_positions.append(np.zeros((residue_constants.atom_type_num, 3)))
+ templates_all_atom_masks.append(np.zeros(residue_constants.atom_type_num))
+ output_templates_sequence.append('-')
+ output_confidence_scores.append(-1)
+
+ for k, v in mapping.items():
+ template_index = v + mapping_offset
+ templates_all_atom_positions[k] = all_atom_positions[template_index][0]
+ templates_all_atom_masks[k] = all_atom_masks[template_index][0]
+ output_templates_sequence[k] = template_sequence[v]
+ if confidence_scores and confidence_scores[v] != ' ':
+ output_confidence_scores[k] = int(confidence_scores[v])
+
+ # Alanine (AA with the lowest number of atoms) has 5 atoms (C, CA, CB, N,
+ # O).
+ if np.sum(templates_all_atom_masks) < 5:
+ raise TemplateAtomMaskAllZerosError('Template all atom mask was all zeros: %s_%s. Residue range: %d-%d' %
+ (pdb_id, chain_id, min(mapping.values()) + mapping_offset,
+ max(mapping.values()) + mapping_offset))
+
+ output_templates_sequence = ''.join(output_templates_sequence)
+
+ templates_aatype = residue_constants.sequence_to_onehot(
+ output_templates_sequence, residue_constants.HHBLITS_AA_TO_ID)
+
+ return (
+ {'template_all_atom_positions': np.array(templates_all_atom_positions),
+ 'template_all_atom_masks': np.array(templates_all_atom_masks),
+ 'template_sequence': output_templates_sequence.encode(),
+ 'template_aatype': np.array(templates_aatype),
+ 'template_confidence_scores': np.array(output_confidence_scores),
+ 'template_domain_names': f'{pdb_id.lower()}_{chain_id}'.encode(),
+ 'template_release_date': mmcif_object.header['release_date'].encode()},
+ warning)
+
+
+def _build_query_to_hit_index_mapping(
+ hit_query_sequence: str,
+ hit_sequence: str,
+ indices_hit: Sequence[int],
+ indices_query: Sequence[int],
+ original_query_sequence: str) -> Mapping[int, int]:
+ """Gets mapping from indices in original query sequence to indices in the hit.
+
+ hit_query_sequence and hit_sequence are two aligned sequences containing gap
+ characters. hit_query_sequence contains only the part of the original query
+ sequence that matched the hit. When interpreting the indices from the .hhr, we
+ need to correct for this to recover a mapping from original query sequence to
+ the hit sequence.
+
+ Args:
+ hit_query_sequence: The portion of the query sequence that is in the .hhr
+ hit
+ hit_sequence: The portion of the hit sequence that is in the .hhr
+ indices_hit: The indices for each aminoacid relative to the hit sequence
+ indices_query: The indices for each aminoacid relative to the original query
+ sequence
+ original_query_sequence: String describing the original query sequence.
+
+ Returns:
+ Dictionary with indices in the original query sequence as keys and indices
+ in the hit sequence as values.
+ """
+ # If the hit is empty (no aligned residues), return empty mapping
+ if not hit_query_sequence:
+ return {}
+
+ # Remove gaps and find the offset of hit.query relative to original query.
+ hhsearch_query_sequence = hit_query_sequence.replace('-', '')
+ hit_sequence = hit_sequence.replace('-', '')
+ hhsearch_query_offset = original_query_sequence.find(hhsearch_query_sequence)
+
+ # Index of -1 used for gap characters. Subtract the min index ignoring
+ # gaps.
+ min_idx = min(x for x in indices_hit if x > -1)
+ fixed_indices_hit = [x - min_idx if x > -1 else -1 for x in indices_hit]
+
+ min_idx = min(x for x in indices_query if x > -1)
+ fixed_indices_query = [x - min_idx if x > - 1 else - 1 for x in indices_query]
+
+ # Zip the corrected indices, ignore case where both seqs have gap
+ # characters.
+ mapping = {}
+ for q_i, q_t in zip(fixed_indices_query, fixed_indices_hit):
+ if q_t != -1 and q_i != -1:
+ if (q_t >= len(hit_sequence) or q_i + hhsearch_query_offset >= len(original_query_sequence)):
+ continue
+ mapping[q_i + hhsearch_query_offset] = q_t
+
+ return mapping
+
+
+@dataclasses.dataclass(frozen=True)
+class SingleHitResult:
+ features: Optional[Mapping[str, Any]]
+ error: Optional[str]
+ warning: Optional[str]
+
+
+def _process_single_hit(
+ query_sequence,
+ query_pdb_code,
+ hit,
+ mmcif_dir,
+ max_template_date,
+ release_dates,
+ obsolete_pdbs,
+ kalign_binary_path,
+ strict_error_check):
+ """Tries to extract template features from a single HHSearch hit."""
+ # Fail hard if we can't get the PDB ID and chain name from the hit.
+ hit_pdb_code, hit_chain_id = _get_pdb_id_and_chain(hit)
+
+ if hit_pdb_code not in release_dates:
+ if hit_pdb_code in obsolete_pdbs:
+ hit_pdb_code = obsolete_pdbs[hit_pdb_code]
+
+ # Pass hit_pdb_code since it might have changed due to the pdb being
+ # obsolete.
+ try:
+ _assess_hhsearch_hit(
+ hit=hit,
+ hit_pdb_code=hit_pdb_code,
+ query_sequence=query_sequence,
+ query_pdb_code=query_pdb_code,
+ release_dates=release_dates,
+ release_date_cutoff=max_template_date)
+ except PrefilterError as e:
+ msg = f'hit {hit_pdb_code}_{hit_chain_id} did not pass prefilter: {str(e)}'
+ logging.info('%s: %s', query_pdb_code, msg)
+ if strict_error_check and isinstance(e, (DateError, PdbIdError, DuplicateError)):
+ # In strict mode we treat some prefilter cases as errors.
+ return SingleHitResult(features=None, error=msg, warning=None)
+
+ return SingleHitResult(features=None, error=None, warning=None)
+
+ mapping = _build_query_to_hit_index_mapping(
+ hit.query, hit.hit_sequence, hit.indices_hit, hit.indices_query, query_sequence)
+
+ # The mapping is from the query to the actual hit sequence, so we need to
+ # remove gaps (which regardless have a missing confidence score).
+ template_sequence = hit.hit_sequence.replace('-', '')
+ confidence_scores = ''.join([cs for t, cs in zip(hit.hit_sequence, hit.confidence_scores) if t != '-'])
+
+ cif_path = os.path.join(mmcif_dir, hit_pdb_code + '.cif')
+ if not os.path.exists(cif_path):
+ cif_path = os.path.join(mmcif_dir, hit_pdb_code.upper() + '.cif')
+ logging.info('Reading PDB entry from %s. Query: %s, template: %s', cif_path, query_sequence, template_sequence)
+ # Fail if we can't find the mmCIF file.
+ with open(cif_path, 'r') as cif_file:
+ cif_string = cif_file.read()
+
+ parsing_result = parse_mmcif(file_id=hit_pdb_code, mmcif_string=cif_string)
+
+ if parsing_result.mmcif_object is not None:
+ hit_release_date = datetime.datetime.strptime(parsing_result.mmcif_object.header['release_date'], '%Y-%m-%d')
+ if hit_release_date > max_template_date:
+ error = ('Template %s date (%s) > max template date (%s).' %
+ (hit_pdb_code, hit_release_date, max_template_date))
+ if strict_error_check:
+ return SingleHitResult(features=None, error=error, warning=None)
+ logging.warning(error)
+ return SingleHitResult(features=None, error=None, warning=None)
+
+ try:
+ features, realign_warning = _extract_template_features(
+ mmcif_object=parsing_result.mmcif_object,
+ pdb_id=hit_pdb_code,
+ mapping=mapping,
+ template_sequence=template_sequence,
+ query_sequence=query_sequence,
+ template_chain_id=hit_chain_id,
+ confidence_scores=confidence_scores,
+ kalign_binary_path=kalign_binary_path)
+ features['template_e_value'] = [hit.e_value]
+ features['template_sum_probs'] = [hit.sum_probs]
+ features['template_prob_true'] = [hit.prob_true]
+ features['template_score'] = [hit.score]
+ features['template_neff'] = [hit.neff]
+ features['template_similarity'] = [hit.similarity]
+
+ # It is possible there were some errors when parsing the other chains in the
+ # mmCIF file, but the template features for the chain we want were still
+ # computed. In such case the mmCIF parsing errors are not relevant.
+ return SingleHitResult(features=features, error=None, warning=realign_warning)
+ except (NoChainsError, NoAtomDataInTemplateError,
+ TemplateAtomMaskAllZerosError) as e:
+ # These 3 errors indicate missing mmCIF experimental data rather than a
+ # problem with the template search, so turn them into warnings.
+ warning = ('%s_%s (sum_probs: %.2f, rank: %d): feature extracting errors: '
+ '%s, mmCIF parsing errors: %s' % (hit_pdb_code,
+ hit_chain_id,
+ hit.sum_probs,
+ hit.index,
+ str(e),
+ parsing_result.errors))
+ if strict_error_check:
+ return SingleHitResult(features=None, error=warning, warning=None)
+ return SingleHitResult(features=None, error=None, warning=warning)
+ except Error as e:
+ error = ('%s_%s (sum_probs: %.2f, rank: %d): feature extracting errors: '
+ '%s, mmCIF parsing errors: %s' % (hit_pdb_code,
+ hit_chain_id,
+ hit.sum_probs,
+ hit.index,
+ str(e),
+ parsing_result.errors))
+ return SingleHitResult(features=None, error=error, warning=None)
+
+
+@dataclasses.dataclass(frozen=True)
+class TemplateSearchResult:
+ features: Mapping[str, Any]
+ errors: Sequence[str]
+ warnings: Sequence[str]
+
+
+class TemplateHitFeaturizer:
+ """A class for turning hhr hits to template features."""
+
+ def __init__(
+ self,
+ mmcif_dir: str,
+ max_template_date: str,
+ max_hits: int,
+ kalign_binary_path: str,
+ release_dates_path: Optional[str],
+ strict_error_check: bool = False):
+ """Initializes the Template Search.
+
+ Args:
+ mmcif_dir: Path to a directory with mmCIF structures. Once a template ID
+ is found by HHSearch, this directory is used to retrieve the template
+ data.
+ max_template_date: The maximum date permitted for template structures. No
+ template with date higher than this date will be returned. In ISO8601
+ date format, YYYY-MM-DD.
+ max_hits: The maximum number of templates that will be returned.
+ kalign_binary_path: The path to a kalign executable used for template
+ realignment.
+ release_dates_path: An optional path to a file with a mapping from PDB IDs
+ to their release dates. Thanks to this we don't have to redundantly
+ parse mmCIF files to get that information.
+ obsolete_pdbs_path: An optional path to a file containing a mapping from
+ obsolete PDB IDs to the PDB IDs of their replacements.
+ strict_error_check: If True, then the following will be treated as errors:
+ * If any template date is after the max_template_date.
+ * If any template has identical PDB ID to the query.
+ * If any template is a duplicate of the query.
+ * Any feature computation errors.
+ """
+ self._mmcif_dir = mmcif_dir
+ if not glob.glob(os.path.join(self._mmcif_dir, '*.cif')):
+ logging.error('Could not find CIFs in %s', self._mmcif_dir)
+ raise ValueError(f'Could not find CIFs in {self._mmcif_dir}')
+
+ try:
+ self._max_template_date = datetime.datetime.strptime(max_template_date, '%Y-%m-%d')
+ except ValueError:
+ raise ValueError('max_template_date must be set and have format YYYY-MM-DD.')
+ self._max_hits = max_hits
+ self._kalign_binary_path = kalign_binary_path
+ self._strict_error_check = strict_error_check
+
+ if release_dates_path:
+ logging.info('Using precomputed release dates %s.', release_dates_path)
+ self._release_dates = _parse_release_dates(release_dates_path)
+ else:
+ self._release_dates = {}
+
+ self._obsolete_pdbs = {}
+
+ def get_templates(
+ self,
+ query_sequence,
+ query_pdb_code,
+ query_release_date,
+ hhr_hits):
+ """Computes the templates for given query sequence (more details above)."""
+ logging.info('Searching for template for: %s', query_pdb_code)
+
+ template_features = {}
+ for template_feature_name in TEMPLATE_FEATURES:
+ template_features[template_feature_name] = []
+
+ # Always use a max_template_date. Set to query_release_date minus 60 days
+ # if that's earlier.
+ template_cutoff_date = self._max_template_date
+ if query_release_date:
+ delta = datetime.timedelta(days=60)
+ if query_release_date - delta < template_cutoff_date:
+ template_cutoff_date = query_release_date - delta
+ assert template_cutoff_date < query_release_date
+ assert template_cutoff_date <= self._max_template_date
+
+ num_hits = 0
+ errors = []
+ warnings = []
+
+ for hit in sorted(hhr_hits, key=lambda x: x.sum_probs, reverse=True):
+ # We got all the templates we wanted, stop processing HHSearch
+ # hits.
+ if num_hits >= self._max_hits:
+ break
+
+ result = _process_single_hit(
+ query_sequence=query_sequence,
+ query_pdb_code=query_pdb_code,
+ hit=hit,
+ mmcif_dir=self._mmcif_dir,
+ max_template_date=template_cutoff_date,
+ release_dates=self._release_dates,
+ obsolete_pdbs=self._obsolete_pdbs,
+ strict_error_check=self._strict_error_check,
+ kalign_binary_path=self._kalign_binary_path)
+
+ if result.error:
+ errors.append(result.error)
+
+ # There could be an error even if there are some results, e.g. thrown by
+ # other unparsable chains in the same mmCIF file.
+ if result.warning:
+ warnings.append(result.warning)
+
+ if result.features is None:
+ logging.info('Skipped invalid hit %s, error: %s, warning: %s', hit.name, result.error, result.warning)
+ else:
+ # Increment the hit counter, since we got features out of this
+ # hit.
+ num_hits += 1
+ for k in template_features:
+ template_features.get(k).append(result.features[k])
+
+ for name in template_features:
+ if num_hits > 0:
+ template_features[name] = np.stack(template_features.get(name),
+ axis=0).astype(TEMPLATE_FEATURES.get(name))
+ else:
+ # Make sure the feature has correct dtype even if empty.
+ template_features[name] = np.array([], dtype=TEMPLATE_FEATURES.get(name))
+
+ return TemplateSearchResult(features=template_features, errors=errors, warnings=warnings)
diff --git a/MindSPONGE/applications/research/FAAST/data/utils.py b/MindSPONGE/applications/research/FAAST/data/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a70e682276f0a0a2c43ac68d935a4794f3202c35
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/data/utils.py
@@ -0,0 +1,83 @@
+# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+utils module used for tmpdir generation.
+"""
+import time
+import contextlib
+import tempfile
+import shutil
+import pickle
+import os
+import numpy as np
+from absl import logging
+
+from .parsers import parse_fasta
+
+truncated_normal_stddev_factor = np.asarray(.87962566103423978, dtype=np.float32)
+
+
+@contextlib.contextmanager
+def tmpdir_manager(base_dir: str):
+ '''tmpdir_manager.'''
+ tmpdir = tempfile.mkdtemp(dir=base_dir)
+ try:
+ yield tmpdir
+ finally:
+ shutil.rmtree(tmpdir, ignore_errors=True)
+
+
+@contextlib.contextmanager
+def timing(msg: str):
+ logging.info('Started %s', msg)
+ tic = time.time()
+ yield
+ toc = time.time()
+ logging.info('Finished %s in %.3f seconds', msg, toc - tic)
+
+
+def get_raw_feature(input_path, feature_generator, use_pkl, prot_name):
+ '''get raw feature of protein by loading pkl file or searching from database'''
+ if use_pkl:
+ f = open(input_path, "rb")
+ data = pickle.load(f)
+ f.close()
+ return data
+ return feature_generator.monomer_feature_generate(input_path, prot_name)
+
+
+def get_crop_size(input_path, use_pkl):
+ '''get crop size of sequence by comparing all input sequences\' length'''
+ filenames = os.listdir(input_path)
+ max_length = 0
+ for filename in filenames:
+ file_full_path = os.path.join(input_path, filename)
+ if use_pkl:
+ with open(file_full_path, "rb") as f:
+ data = pickle.load(f)
+ current_crop_size = (data["msa"].shape[1] // 256 + 1) * 256
+ if data["msa"].shape[1] <= 128:
+ current_crop_size = 128
+ max_length = max(max_length, current_crop_size)
+ else:
+ with open(file_full_path, "r") as f:
+ input_fasta_str = f.read()
+ input_seqs, _ = parse_fasta(input_fasta_str)
+ current_crop_size = (len(input_seqs[0]) // 256 + 1) * 256
+ if len(input_seqs[0]) <= 128:
+ current_crop_size = 128
+ max_length = max(max_length, current_crop_size)
+
+ return max_length
diff --git a/MindSPONGE/applications/research/FAAST/extract_restraints.py b/MindSPONGE/applications/research/FAAST/extract_restraints.py
new file mode 100644
index 0000000000000000000000000000000000000000..99f1a5a3867f59b4ef7a21cb4432e1f1070e11e1
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/extract_restraints.py
@@ -0,0 +1,187 @@
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"extract_restraints"
+import argparse
+import os
+import stat
+import numpy as np
+from mindsponge.common import residue_constants
+from mindsponge.common.protein import from_pdb_string
+
+parser = argparse.ArgumentParser(description='extract_restraints.py')
+parser.add_argument('--pdb_path', type=str, help='Location of training pdb file.')
+parser.add_argument('--output_file', type=str, help='output file')
+
+arguments = parser.parse_args()
+
+
+def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
+ """Create pseudo beta features."""
+ is_gly = np.equal(aatype, residue_constants.restype_order['G'])
+ ca_idx = residue_constants.atom_order['CA']
+ cb_idx = residue_constants.atom_order['CB']
+ pseudo_beta = np.where(np.tile(is_gly[..., None].astype("int32"), \
+ [1,] * len(is_gly.shape) + [3,]).astype("bool"), \
+ all_atom_positions[..., ca_idx, :], \
+ all_atom_positions[..., cb_idx, :])
+ if all_atom_masks is not None:
+ pseudo_beta_mask = np.where(is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx])
+ pseudo_beta_mask = pseudo_beta_mask.astype(np.float32)
+ return pseudo_beta, pseudo_beta_mask
+ return pseudo_beta
+
+
+def preprocess_contact_info(gt_features):
+ '''preprocess_contact_info'''
+ remote_residue_threshold = 6
+
+ # contact_mask
+ pseudo_beta_mask = gt_features["pseudo_beta_mask"]
+ contact_mask = pseudo_beta_mask[:, None] * pseudo_beta_mask[None]
+
+ seq_len = pseudo_beta_mask.shape[0]
+ if seq_len > remote_residue_threshold + 1:
+ diagonal_mask = np.eye(seq_len)
+ for i in range(1, remote_residue_threshold + 1):
+ diagonal_mask += np.eye(seq_len, seq_len, i)
+ diagonal_mask += np.eye(seq_len, seq_len, -i)
+ diagonal_mask = diagonal_mask < 0.5
+ contact_mask *= diagonal_mask
+ else:
+ contact_mask *= 0
+
+ gt_features["contact_mask"] = contact_mask
+
+ return gt_features
+
+
+def generate_gaussian_filter(kernel_size, sigma=1, muu=0):
+ '''generate_gaussian_filter'''
+ # Initializing value of x,y as grid of kernel size
+ # in the range of kernel size
+ x, y = np.meshgrid(np.linspace(-1, 1, kernel_size),
+ np.linspace(-1, 1, kernel_size))
+ dst = np.sqrt(x ** 2 + y ** 2)
+
+ # lower normal part of gaussian
+ normal = 1 / (2.0 * np.pi * sigma ** 2)
+
+ # Calculating Gaussian filter
+ gauss = np.exp(-((dst - muu) ** 2 / (2.0 * sigma ** 2))) * normal
+ gauss /= np.max(gauss)
+ return gauss
+
+
+def smoothing(x, margin_size=2):
+ '''smoothing'''
+ kernel_size = 2 * margin_size + 1
+ x_pad = np.pad(x, ((margin_size, margin_size), (margin_size, margin_size)), constant_values=0)
+ gaussian_filter = generate_gaussian_filter(kernel_size, sigma=1, muu=0)
+ index = np.where(x_pad > 0)
+ for i, j in np.array(index).transpose():
+ x_pad[i - margin_size:i + margin_size + 1, j - margin_size:j + margin_size + 1] = \
+ np.maximum(gaussian_filter, x_pad[i - margin_size:i + margin_size + 1, \
+ j - margin_size:j + margin_size + 1])
+ return x_pad[margin_size: - margin_size, margin_size: -margin_size]
+
+
+def generate_contact_info(gt_features):
+ '''generate_contact_info'''
+ true_pseudo_beta = gt_features["pseudo_beta"]
+ sequence_length = true_pseudo_beta.shape[0]
+ contact_mask_input = np.zeros((sequence_length, sequence_length)).astype(np.float32)
+
+ np.random.seed(0)
+ try:
+ constraints_num = 200
+ print("num", constraints_num)
+
+ good_constraints_num_ratio = 1.0
+
+ constraints_num1 = int(constraints_num * good_constraints_num_ratio)
+
+ contact_mask = gt_features["contact_mask"]
+
+ true_cb_distance = np.sqrt((np.square(true_pseudo_beta[None] - true_pseudo_beta[:, None])).sum(-1) + 1e-8)
+
+ # positive sample
+ probs = (1.0 - 1 / (1 + np.exp(-400000.0 * (true_cb_distance - 8))))
+ randoms = np.random.random(probs.shape)
+ selected_index = np.where((probs > randoms) * contact_mask > 0.5)
+
+ selected_index = np.array(selected_index).transpose()
+ np.random.shuffle(selected_index)
+ final_selected_index = selected_index[:constraints_num1]
+ result = ""
+ for str1 in final_selected_index:
+ temp = f"{str1[0]} {str1[1]}"
+ result += temp
+ result += "\n"
+
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(arguments.output_file, os_flags, os_modes), 'w', encoding='utf-8') as fout:
+ fout.write(result)
+ real_good_constraints_num = final_selected_index.shape[0]
+
+ for i in range(final_selected_index.shape[0]):
+ contact_mask_input[final_selected_index[i][0], final_selected_index[i][1]] = 1
+
+ bad_constraints_num = int(
+ real_good_constraints_num * (1 - good_constraints_num_ratio) / good_constraints_num_ratio)
+ probs = 1 / (1 + np.exp(-400000.0 * (true_cb_distance - 12)))
+ randoms = np.random.random(probs.shape)
+ selected_index = np.where((probs > randoms) * contact_mask > 0.5)
+
+ selected_index = np.array(selected_index).transpose()
+ np.random.shuffle(selected_index)
+ final_selected_index = selected_index[:bad_constraints_num]
+
+ print("constraints_num, good_constraints_num_ratio, real_good_constraints_num, bad_constraints_num",
+ constraints_num, good_constraints_num_ratio, real_good_constraints_num, bad_constraints_num)
+
+ for i in range(final_selected_index.shape[0]):
+ print('hello world', i)
+ contact_mask_input[final_selected_index[i][0], final_selected_index[i][1]] = 1
+
+
+
+ except Exception as e:
+ print("error while generating contact info", e)
+
+ np.random.seed()
+
+
+def select_contacts(pdb_file_path):
+ '''select_contacts'''
+ with open(pdb_file_path, 'r') as f:
+ prot_pdb = from_pdb_string(f.read())
+ aatype = prot_pdb.aatype
+ atom37_positions = prot_pdb.atom_positions.astype(np.float32)
+ atom37_mask = prot_pdb.atom_mask.astype(np.float32)
+
+ # get pseudo_beta, pseudo_beta_mask
+ pseudo_beta, pseudo_beta_mask = pseudo_beta_fn(aatype, atom37_positions, atom37_mask)
+
+ # combine all gt features
+ gt_features = {'pseudo_beta': pseudo_beta, 'pseudo_beta_mask': pseudo_beta_mask}
+
+ gt_features = preprocess_contact_info(gt_features)
+
+ generate_contact_info(gt_features)
+
+
+if __name__ == "__main__":
+ select_contacts(arguments.pdb_path)
diff --git a/MindSPONGE/applications/research/FAAST/install.sh b/MindSPONGE/applications/research/FAAST/install.sh
new file mode 100644
index 0000000000000000000000000000000000000000..da18c00415e2cc8a7751b1c1a6c57993b479e313
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/install.sh
@@ -0,0 +1,26 @@
+#!/bin/bash -e
+
+git clone https://github.com/soedinglab/hh-suite.git
+mkdir -p hh-suite/build && cd hh-suite/build
+cmake -DCMAKE_INSTALL_PREFIX=. ..
+make -j 4 && make install
+cd ../../
+git clone https://github.com/TimoLassmann/kalign.git
+cd kalign
+mkdir build
+cd build
+cmake ..
+sed -i "2aset(CMAKE_INSTALL_PREFIX ../../)" cmake_install.cmake
+make
+make test
+make install
+
+pip install pandas
+pip install pynvml
+pip install decorator
+pip install tqdm
+pip install scikit-learn
+pip install pyparsing
+pip uninstall --yes urllib3 && pip install urllib3==1.26.14
+conda install --yes openmm
+conda install --yes -c conda-forge pdbfixer
diff --git a/MindSPONGE/applications/research/FAAST/main.py b/MindSPONGE/applications/research/FAAST/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e60bdab9780f213429712049d146c877ae6b138
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/main.py
@@ -0,0 +1,331 @@
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""eval script"""
+import argparse
+import os
+import ast
+import stat
+import pickle
+import pynvml
+import numpy as np
+
+import mindspore.context as context
+import mindspore.common.dtype as mstype
+from mindspore import Tensor, nn, load_checkpoint
+from mindsponge.cell.amp import amp_convert
+from mindsponge.common.config_load import load_config
+from mindsponge.common.protein import to_pdb, from_prediction
+from mindsponge.common import residue_constants
+from commons.analysis import predur_vs_predpdb, filter_ur_with_pdb
+from run_relax import run_relax
+from data import Feature, RawFeatureGenerator, get_crop_size, get_raw_feature
+from model import MegaFold, compute_confidence
+from assign_settings import assign_all_settings
+from assign.assign import assign_iteration
+from assign.init_assign import init_assign_call
+
+parser = argparse.ArgumentParser(description='Inputs for eval.py')
+parser.add_argument('--data_config', default="./config/data.yaml", help='data process config')
+parser.add_argument('--model_config', default="./config/model.yaml", help='model config')
+parser.add_argument('--use_custom', type=ast.literal_eval, default=False, help='whether use custom')
+parser.add_argument('--input_path', help='processed raw feature path')
+parser.add_argument('--pdb_path', type=str, help='Location of training pdb file.')
+parser.add_argument('--peaklist_path', type=str, default="./pdb_peaklist", help='peaklist_path.')
+parser.add_argument('--use_pkl', type=ast.literal_eval, default=False,
+ help="use pkl as input or fasta file as input, in default use fasta")
+parser.add_argument('--use_template', type=ast.literal_eval, default=False,
+ help="use_template or not, in default use template")
+parser.add_argument('--checkpoint_file', help='checkpoint path')
+parser.add_argument('--device_id', default=0, type=int, help='DEVICE_ID')
+parser.add_argument('--a3m_path', type=str, help='a3m_path')
+parser.add_argument('--template_path', type=str, help='template_path')
+parser.add_argument('--output_path', type=str, help='final result path')
+parser.add_argument('--run_platform', default='Ascend', type=str, help='which platform to use, Ascend or GPU')
+
+arguments = parser.parse_args()
+
+
+def init_assign_with_pdb(prot_path, ur_path, ur_tuple_path, ref_pdb=None):
+ '''init_assign_with_pdb'''
+ print("\nInitial assignment:")
+ prot_name = prot_path.split("/")[-1]
+ ur_list, ur_list_tuple = init_assign_call(prot_path=prot_path)
+ if ref_pdb:
+ print(f"Filtering restraint with given structure.")
+ ur_list_tuple = filter_ur_with_pdb(ur_list, ref_pdb)
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(ur_path + "/" + prot_name + ".pkl", os_flags, os_modes), "wb") as fout:
+ pickle.dump(ur_list, fout)
+ with os.fdopen(os.open(ur_tuple_path + "/" + prot_name + ".pkl", os_flags, os_modes), "wb") as fout:
+ pickle.dump(ur_list_tuple, fout)
+
+
+def restraint_evaluation(final_atom_positions, aatype, restraint_mask_input):
+ '''restraint_evaluation'''
+ if restraint_mask_input.sum() < 1:
+ return 1.0
+ restraint_mask_input = restraint_mask_input.astype(np.float32)
+ pseudo_beta_pred = pseudo_beta_fn(aatype, final_atom_positions, None) # CA as CB for glycine
+ cb_distance_pred = np.sqrt((np.square(pseudo_beta_pred[None] - pseudo_beta_pred[:, None])).sum(-1) + 1e-8)
+ has_restraint_pred = (cb_distance_pred <= 10).astype(np.float32) # 8.0 or 10.0
+
+ restraint_pred_rate_input = ((has_restraint_pred == restraint_mask_input) * \
+ restraint_mask_input).sum() / (restraint_mask_input.sum() + 1e-8)
+
+ return round(restraint_pred_rate_input, 4)
+
+
+def analysis(predur_path, predpdb_path, filter_names, iter_idx):
+ '''analysis'''
+ output_predur_vs_predpdb, confs = predur_vs_predpdb(predur_path=predur_path,
+ predpdb_path=predpdb_path,
+ filter_names=filter_names,
+ return_conf=True)
+ if len(output_predur_vs_predpdb.shape) == 2:
+ output_predur_vs_predpdb = output_predur_vs_predpdb[:, [0, 6, 12, 5, 11]]
+ else:
+ output_predur_vs_predpdb = output_predur_vs_predpdb[[0, 6, 12, 5, 11]]
+
+ outputs_all = output_predur_vs_predpdb
+
+ keys = ["protein name", "restraints number per residue",
+ "long restraints number per residue",
+ "restraints structure coincidence rate",
+ "long restraints structure coincidence rate"]
+ print(f"Iteration {iter_idx}:")
+
+ for outputs in outputs_all:
+ for key, output in zip(keys, outputs):
+ print(key, ": ", output)
+ print()
+
+ return confs
+
+
+def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
+ """Create pseudo beta features."""
+ is_gly = np.equal(aatype, residue_constants.restype_order['G'])
+ ca_idx = residue_constants.atom_order['CA']
+ cb_idx = residue_constants.atom_order['CB']
+ pseudo_beta = np.where(np.tile(is_gly[..., None].astype("int32"), \
+ [1,] * len(is_gly.shape) + [3,]).astype("bool"), \
+ all_atom_positions[..., ca_idx, :], \
+ all_atom_positions[..., cb_idx, :])
+ if all_atom_masks is not None:
+ pseudo_beta_mask = np.where(is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx])
+ pseudo_beta_mask = pseudo_beta_mask.astype(np.float32)
+ return pseudo_beta, pseudo_beta_mask
+ return pseudo_beta
+
+
+def make_restraint_info(ori_seq_len, ur_path, distance_threshold=1, sample_ur_rate=0.1):
+ '''make_restraint_info'''
+ num_residues = ori_seq_len
+ restraint_info_mask = np.zeros((num_residues, num_residues))
+ if not ur_path:
+ return restraint_info_mask
+
+ with open(ur_path, "rb") as f:
+ useful_urs = pickle.load(f)
+
+ useful_urs = [[i, j] for i, j in useful_urs if abs(i - j) >= distance_threshold]
+ ur_num = int(len(useful_urs) * sample_ur_rate)
+ np.random.shuffle(useful_urs)
+ useful_urs = useful_urs[:ur_num]
+
+ for i, j in useful_urs:
+ restraint_info_mask[int(i) - 1, int(j) - 1] = 1
+ restraint_info_mask = (restraint_info_mask + restraint_info_mask.T) > 0
+ restraint_info_mask = restraint_info_mask.astype(np.float32)
+
+ return restraint_info_mask
+
+
+def eval_main(prot_names, megafold, model_cfg, data_cfg, feature_generator):
+ 'eval_main'
+ peaklist_path = arguments.peaklist_path
+ for prot_file in prot_names:
+ res_path = "./megaassign/"
+ res_path_all = "./oriassign/"
+ os.makedirs(res_path_all, exist_ok=True)
+
+ for iter_idx in range(len(assign_all_settings)):
+ all_settings = assign_all_settings.get(iter_idx)
+ print(f"Settings for iteration {iter_idx}")
+ print(all_settings, flush=True)
+ sample_ur_rate = all_settings.get("infer_pdb")["sample_ur_rate"]
+
+ local_res_path = os.path.join(res_path, f"iter_{iter_idx}")
+ local_ur_tuple_path = os.path.join(local_res_path, "ur_tuple")
+
+ next_res_path = os.path.join(res_path, "iter_" + str(iter_idx + 1))
+ next_ur_path = os.path.join(next_res_path, "ur")
+ next_ur_tuple_path = os.path.join(next_res_path, "ur_tuple")
+
+ local_unrelaxed_pdb_path = os.path.join(local_res_path, "structure")
+ local_relaxed_pdb_path = os.path.join(local_res_path, "structure_relaxed")
+ for path in [local_res_path, local_ur_tuple_path, local_unrelaxed_pdb_path, local_relaxed_pdb_path,
+ next_ur_path, next_ur_tuple_path, next_res_path]:
+ os.makedirs(path, exist_ok=True)
+ print("local_res_path ", local_res_path, flush=True)
+ os.makedirs(local_res_path, exist_ok=True)
+
+ for repeat_idx in range(all_settings["infer_pdb"]["num_repeats"]):
+ prot_name = prot_file.split('.')[0]
+ if all_settings["init_assign"]:
+ ur_file_path = None
+ else:
+ ur_file_path = os.path.join(local_ur_tuple_path, f"{prot_name}.pkl")
+ raw_feature = get_raw_feature(os.path.join(arguments.input_path, prot_file), feature_generator,
+ arguments.use_pkl, prot_name)
+ ori_res_length = raw_feature['msa'].shape[1]
+ restraint_info_mask_new = make_restraint_info(model_cfg.seq_length, ur_file_path,
+ sample_ur_rate=sample_ur_rate)
+ restraint_info_mask_new = Tensor(restraint_info_mask_new, mstype.float32)
+ processed_feature = Feature(data_cfg, raw_feature)
+ feat, prev_pos, prev_msa_first_row, prev_pair = processed_feature.pipeline(data_cfg, \
+ mixed_precision=arguments.mixed_precision)
+ prev_pos = Tensor(prev_pos)
+ prev_msa_first_row = Tensor(prev_msa_first_row)
+ prev_pair = Tensor(prev_pair)
+
+ for i in range(4):
+ feat_i = [Tensor(x[i]) for x in feat]
+ result = megafold(*feat_i,
+ prev_pos,
+ prev_msa_first_row,
+ prev_pair,
+ restraint_info_mask_new)
+ prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits = result
+
+ eval_res = restraint_evaluation(prev_pos.asnumpy()[:ori_res_length],
+ feat[4][0][:ori_res_length],
+ restraint_info_mask_new[:ori_res_length, :ori_res_length].asnumpy())
+ restraint_pred_rate_input = eval_res
+
+ final_atom_positions = prev_pos.asnumpy()[:ori_res_length]
+ final_atom_mask = feat[16][0][:ori_res_length]
+ predicted_lddt_logits = predicted_lddt_logits.asnumpy()[:ori_res_length]
+ confidence, plddt = compute_confidence(predicted_lddt_logits, return_lddt=True)
+
+ b_factors = plddt[:, None] * final_atom_mask
+
+ unrelaxed_protein = from_prediction(final_atom_positions,
+ final_atom_mask,
+ feat[4][0][:ori_res_length],
+ feat[17][0][:ori_res_length],
+ b_factors)
+ pdb_file = to_pdb(unrelaxed_protein)
+
+ unrelaxed_pdb_file_path = os.path.join(local_unrelaxed_pdb_path, f'{prot_name}_{repeat_idx}.pdb')
+ relaxed_pdb_file_path = os.path.join(local_relaxed_pdb_path, f'{prot_name}_{repeat_idx}.pdb')
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(unrelaxed_pdb_file_path, os_flags, os_modes), 'w') as fout:
+ fout.write(pdb_file)
+ print(f">>>>>>>>>>>>>>>>>>>>>>Protein name: {prot_name}, iteration: {iter_idx}, "
+ f"repeat: {repeat_idx}, number of input restraint pair: "
+ f"{int(restraint_info_mask_new.asnumpy().sum())}, confidence: {round(confidence, 2)}, "
+ f"input restraint recall: {restraint_pred_rate_input}.", flush=True)
+ run_relax(unrelaxed_pdb_file_path, relaxed_pdb_file_path)
+
+ names = os.listdir(arguments.input_path)
+ names = [name.split(".")[0] for name in names if name.split(".")[-1] == "pkl"]
+ names.sort()
+ if all_settings["init_assign"]:
+ prot_path = os.path.join(peaklist_path, prot_name)
+ init_assign_with_pdb(prot_path, next_ur_path, next_ur_tuple_path, ref_pdb=relaxed_pdb_file_path)
+ else:
+ assign_iteration(next_ur_tuple_path,
+ next_ur_path,
+ local_relaxed_pdb_path,
+ peaklist_path,
+ all_settings,
+ filter_names=names)
+ _ = analysis(predur_path=next_ur_path,
+ predpdb_path=local_relaxed_pdb_path,
+ filter_names=names,
+ iter_idx=iter_idx)
+
+ save_res_local = f"{res_path_all}/{prot_name}"
+ os.system(f"mv {res_path} {save_res_local}")
+
+
+def fold_infer(args):
+ '''faast infer'''
+ data_cfg = load_config(args.data_config)
+ model_cfg = load_config(args.model_config)
+ data_cfg.eval.crop_size = get_crop_size(args.input_path, args.use_pkl)
+ model_cfg.seq_length = data_cfg.eval.crop_size
+ if args.run_platform == "GPU":
+ pynvml.nvmlInit()
+ pynvml.nvmlSystemGetDriverVersion()
+ handle = pynvml.nvmlDeviceGetHandleByIndex(0)
+ info = pynvml.nvmlDeviceGetMemoryInfo(handle)
+ total = info.total / 1024 / 1024 / 1024
+ if total <= 25:
+ model_cfg.slice = model_cfg.slice_new
+ slice_key = "seq_" + str(model_cfg.seq_length)
+ slice_val = vars(model_cfg.slice)[slice_key]
+ model_cfg.slice = slice_val
+
+ megafold = MegaFold(model_cfg, mixed_precision=args.mixed_precision)
+
+ if args.mixed_precision:
+ fp32_white_list = (nn.Softmax, nn.LayerNorm)
+ amp_convert(megafold, fp32_white_list)
+ else:
+ megafold.to_float(mstype.float32)
+
+ temp_names = os.listdir(args.input_path)
+ prot_names = []
+ if not args.use_pkl:
+ os.makedirs(args.a3m_path, exist_ok=True)
+ os.makedirs(args.template_path, exist_ok=True)
+ if args.use_custom:
+ mk_hhsearch_db(args.template_path)
+ feature_generator = RawFeatureGenerator(data_cfg.database_search, args.a3m_path, args.template_path,
+ args.use_custom, args.use_template)
+ for key in temp_names:
+ if "fas" in key:
+ prot_names.append(key)
+ else:
+ feature_generator = None
+ for key in temp_names:
+ if "pkl" in key:
+ prot_names.append(key)
+
+ load_checkpoint(args.checkpoint_file, megafold)
+
+ eval_main(prot_names, megafold, model_cfg, data_cfg, feature_generator)
+
+
+if __name__ == "__main__":
+ if arguments.run_platform == 'Ascend':
+ context.set_context(mode=context.GRAPH_MODE,
+ memory_optimize_level="O1",
+ device_target="Ascend",
+ max_call_depth=6000,
+ device_id=arguments.device_id)
+ arguments.mixed_precision = 1
+ elif arguments.run_platform == 'GPU':
+ context.set_context(mode=context.GRAPH_MODE,
+ memory_optimize_level="O1",
+ device_target="GPU",
+ max_call_depth=6000,
+ device_id=arguments.device_id,)
+ arguments.mixed_precision = 0
+ fold_infer(arguments)
diff --git a/MindSPONGE/applications/research/FAAST/model/__init__.py b/MindSPONGE/applications/research/FAAST/model/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..86ba22bf8f33d00fe0890641bd99a413010a925d
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/model/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+'''init'''
+from .fold import MegaFold, compute_confidence
diff --git a/MindSPONGE/applications/research/FAAST/model/fold.py b/MindSPONGE/applications/research/FAAST/model/fold.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee7b4e68600180db02a6be94a270a941ef5ff85c
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/model/fold.py
@@ -0,0 +1,323 @@
+# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""model"""
+import numpy as np
+import mindspore.common.dtype as mstype
+import mindspore.nn as nn
+import mindspore.numpy as mnp
+from mindspore.ops import operations as P
+import mindspore.ops as ops
+from mindspore.ops import functional as F
+from mindspore.common.tensor import Tensor
+from mindspore import Parameter
+import mindsponge.common.residue_constants as residue_constants
+from mindsponge.common.utils import dgram_from_positions, pseudo_beta_fn, atom37_to_torsion_angles
+from mindsponge.data.data_transform import get_chi_atom_pos_indices
+from mindsponge.cell.initializer import lecun_init
+from module.template_embedding import TemplateEmbedding
+from module.evoformer import Evoformer, EvoformerIterationContact
+from module.structure import StructureModule
+from module.head import DistogramHead, ExperimentallyResolvedHead, MaskedMsaHead, \
+ PredictedLDDTHead, PredictedAlignedErrorHead
+from scipy.special import softmax
+
+
+def caculate_constant_array(seq_length):
+ '''constant array'''
+ chi_atom_indices = np.array(get_chi_atom_pos_indices()).astype(np.int32)
+ chi_angles_mask = list(residue_constants.chi_angles_mask)
+ chi_angles_mask.append([0.0, 0.0, 0.0, 0.0])
+ chi_angles_mask = np.array(chi_angles_mask).astype(np.float32)
+ mirror_psi_mask = np.float32(np.asarray([1., 1., -1., 1., 1., 1., 1.])[None, None, :, None])
+ chi_pi_periodic = np.float32(np.array(residue_constants.chi_pi_periodic))
+
+ indices0 = np.arange(4).reshape((-1, 1, 1, 1, 1)).astype("int32") # 4 batch
+ indices0 = indices0.repeat(seq_length, axis=1) # seq_length sequence length
+ indices0 = indices0.repeat(4, axis=2) # 4 chis
+ indices0 = indices0.repeat(4, axis=3) # 4 atoms
+
+ indices1 = np.arange(seq_length).reshape((1, -1, 1, 1, 1)).astype("int32")
+ indices1 = indices1.repeat(4, axis=0)
+ indices1 = indices1.repeat(4, axis=2)
+ indices1 = indices1.repeat(4, axis=3)
+
+ constant_array = [chi_atom_indices, chi_angles_mask, mirror_psi_mask, chi_pi_periodic, indices0, indices1]
+ constant_array = [Tensor(val) for val in constant_array]
+ return constant_array
+
+
+def compute_confidence(predicted_lddt_logits, return_lddt=False):
+ """compute confidence"""
+
+ num_bins = predicted_lddt_logits.shape[-1]
+ bin_width = 1 / num_bins
+ start_n = bin_width / 2
+ plddt = compute_plddt(predicted_lddt_logits, start_n, bin_width)
+ confidence = np.mean(plddt)
+ if return_lddt:
+ return confidence, plddt
+
+ return confidence
+
+
+def compute_plddt(logits, start_n, bin_width):
+ """Computes per-residue pLDDT from logits.
+
+ Args:
+ logits: [num_res, num_bins] output from the PredictedLDDTHead.
+
+ Returns:
+ plddt: [num_res] per-residue pLDDT.
+ """
+ bin_centers = np.arange(start=start_n, stop=1.0, step=bin_width)
+ probs = softmax(logits, axis=-1)
+ predicted_lddt_ca = np.sum(probs * bin_centers[None, :], axis=-1)
+ return predicted_lddt_ca * 100
+
+
+class MegaFold(nn.Cell):
+ """MegaFold"""
+
+ def __init__(self, config, mixed_precision):
+ super(MegaFold, self).__init__()
+
+ self.cfg = config
+
+ if mixed_precision:
+ self._type = mstype.float16
+ else:
+ self._type = mstype.float32
+ self.is_training = self.cfg.is_training
+ self.recycle_pos = self.cfg.recycle_pos
+ self.recycle_features = self.cfg.recycle_features
+ self.max_relative_feature = self.cfg.max_relative_feature
+ self.num_bins = self.cfg.prev_pos.num_bins
+ self.min_bin = self.cfg.prev_pos.min_bin
+ self.max_bin = self.cfg.prev_pos.max_bin
+ self.template_enabled = self.cfg.template.enabled
+ self.template_embed_torsion_angles = self.cfg.template.embed_torsion_angles
+ self.extra_msa_stack_num = self.cfg.evoformer.extra_msa_stack_num
+ self.msa_stack_num = self.cfg.evoformer.msa_stack_num
+ self.chi_atom_indices, self.chi_angles_mask, self.mirror_psi_mask, self.chi_pi_periodic, \
+ self.indices0, self.indices1 = caculate_constant_array(self.cfg.seq_length)
+
+ self.contact_one_hot = nn.OneHot(depth=2, axis=-1)
+ self.preprocess_contact = nn.Dense(2, 32).to_float(mstype.float16)
+
+ self.preprocess_1d = nn.Dense(self.cfg.common.target_feat_dim, self.cfg.msa_channel,
+ weight_init=lecun_init(self.cfg.common.target_feat_dim))
+ self.preprocess_msa = nn.Dense(self.cfg.common.msa_feat_dim, self.cfg.msa_channel,
+ weight_init=lecun_init(self.cfg.common.msa_feat_dim))
+ self.left_single = nn.Dense(self.cfg.common.target_feat_dim, self.cfg.pair_channel,
+ weight_init=lecun_init(self.cfg.common.target_feat_dim))
+ self.right_single = nn.Dense(self.cfg.common.target_feat_dim, self.cfg.pair_channel,
+ weight_init=lecun_init(self.cfg.common.target_feat_dim))
+ self.prev_pos_linear = nn.Dense(self.cfg.common.dgram_dim, self.cfg.pair_channel,
+ weight_init=lecun_init(self.cfg.common.dgram_dim))
+ self.pair_activations = nn.Dense(self.cfg.common.pair_in_dim, self.cfg.pair_channel,
+ weight_init=lecun_init(self.cfg.common.pair_in_dim))
+ self.extra_msa_one_hot = nn.OneHot(depth=23, axis=-1)
+ self.template_aatype_one_hot = nn.OneHot(depth=22, axis=-1)
+ self.prev_msa_first_row_norm = nn.LayerNorm([256], epsilon=1e-5)
+ self.prev_pair_norm = nn.LayerNorm([128], epsilon=1e-5)
+ self.one_hot = nn.OneHot(depth=self.cfg.max_relative_feature * 2 + 1, axis=-1)
+ self.extra_msa_activations = nn.Dense(25, self.cfg.extra_msa_channel, weight_init=lecun_init(25))
+ self.template_embedding = TemplateEmbedding(self.cfg, mixed_precision)
+
+ self.matmul_trans_b = P.MatMul(transpose_b=True)
+ self.batch_matmul_trans_b = P.BatchMatMul(transpose_b=True)
+ self.template_single_embedding = nn.Dense(57, self.cfg.msa_channel,
+ weight_init=
+ lecun_init(57, initializer_name='relu'))
+ self.template_projection = nn.Dense(self.cfg.msa_channel, self.cfg.msa_channel,
+ weight_init=lecun_init(self.cfg.msa_channel,
+ initializer_name='relu'))
+ self.relu = nn.ReLU()
+ self.single_activations = nn.Dense(self.cfg.msa_channel, self.cfg.seq_channel,
+ weight_init=lecun_init(self.cfg.msa_channel))
+ extra_msa_stack = nn.CellList()
+ for _ in range(self.extra_msa_stack_num):
+ extra_msa_block = Evoformer(self.cfg,
+ msa_act_dim=64,
+ pair_act_dim=128,
+ is_extra_msa=True,
+ batch_size=None)
+ extra_msa_stack.append(extra_msa_block)
+ self.extra_msa_stack = extra_msa_stack
+ if self.is_training:
+ msa_stack = nn.CellList()
+ for _ in range(self.msa_stack_num):
+ msa_block = Evoformer(self.cfg,
+ msa_act_dim=256,
+ pair_act_dim=128,
+ is_extra_msa=False,
+ batch_size=None)
+ msa_stack.append(msa_block)
+ self.msa_stack = msa_stack
+ self.module_distogram = DistogramHead(self.cfg.heads.distogram,
+ self.cfg.pair_channel)
+ self.module_exp_resolved = ExperimentallyResolvedHead(self.cfg.seq_channel)
+ self.module_mask = MaskedMsaHead(self.cfg.heads.masked_msa,
+ self.cfg.msa_channel)
+ self.aligned_error = PredictedAlignedErrorHead(self.cfg.heads.predicted_aligned_error,
+ self.cfg.pair_channel)
+ else:
+ self.msa_stack = EvoformerIterationContact(self.cfg,
+ msa_act_dim=256,
+ pair_act_dim=128,
+ is_extra_msa=False,
+ batch_size=self.msa_stack_num)
+
+ self.idx_evoformer_block = Parameter(Tensor(0, mstype.int32), requires_grad=False)
+ self.evoformer_num_block_eval = Tensor(self.msa_stack_num, mstype.int32)
+
+ self.structure_module = StructureModule(self.cfg,
+ self.cfg.seq_channel,
+ self.cfg.pair_channel)
+
+ self.module_lddt = PredictedLDDTHead(self.cfg.heads.predicted_lddt,
+ self.cfg.seq_channel)
+ self.print = ops.Print()
+
+ def construct(self, target_feat, msa_feat, msa_mask, seq_mask, aatype,
+ template_aatype, template_all_atom_masks, template_all_atom_positions,
+ template_mask, template_pseudo_beta_mask, template_pseudo_beta, extra_msa, extra_has_deletion,
+ extra_deletion_value, extra_msa_mask,
+ residx_atom37_to_atom14, atom37_atom_exists, residue_index,
+ prev_pos, prev_msa_first_row, prev_pair, contact_info_mask):
+ """construct"""
+
+ contact_info_input = contact_info_mask.astype(mstype.int32)
+
+ preprocess_1d = self.preprocess_1d(target_feat)
+ preprocess_msa = self.preprocess_msa(msa_feat)
+ msa_activations = mnp.expand_dims(preprocess_1d, axis=0) + preprocess_msa
+
+ contact_feature = self.contact_one_hot(contact_info_input)
+ contact_act = self.preprocess_contact(contact_feature)
+
+ contact_act2 = contact_act * 1.0
+ contact_info_mask2 = contact_info_mask * 1.0
+
+ left_single = self.left_single(target_feat)
+ right_single = self.right_single(target_feat)
+
+ left_single = F.depend(left_single, contact_info_mask2)
+ left_single = F.depend(left_single, contact_act2)
+ pair_activations = P.ExpandDims()(left_single, 1) + P.ExpandDims()(right_single, 0)
+ mask_2d = P.ExpandDims()(seq_mask, 1) * P.ExpandDims()(seq_mask, 0)
+ if self.recycle_pos:
+ prev_pseudo_beta = pseudo_beta_fn(aatype, prev_pos, None)
+ dgram = dgram_from_positions(prev_pseudo_beta, self.num_bins, self.min_bin, self.max_bin, self._type)
+ pair_activations += self.prev_pos_linear(dgram)
+
+ if self.recycle_features:
+ prev_msa_first_row = self.prev_msa_first_row_norm(prev_msa_first_row)
+ msa_activations = mnp.concatenate(
+ (mnp.expand_dims(prev_msa_first_row + msa_activations[0, ...], 0), msa_activations[1:, ...]), 0)
+ pair_activations += self.prev_pair_norm(prev_pair)
+
+ if self.max_relative_feature:
+ offset = P.ExpandDims()(residue_index, 1) - P.ExpandDims()(residue_index, 0)
+ rel_pos = self.one_hot(mnp.clip(offset + self.max_relative_feature, 0, 2 * self.max_relative_feature))
+ pair_activations += self.pair_activations(rel_pos)
+
+ template_pair_representation = 0
+ if self.template_enabled:
+ template_pair_representation = self.template_embedding(pair_activations, template_aatype,
+ template_all_atom_masks, template_all_atom_positions,
+ template_mask, template_pseudo_beta_mask,
+ template_pseudo_beta, mask_2d)
+ pair_activations += template_pair_representation
+ msa_1hot = self.extra_msa_one_hot(extra_msa)
+ extra_msa_feat = mnp.concatenate((msa_1hot, extra_has_deletion[..., None], extra_deletion_value[..., None]),
+ axis=-1)
+ extra_msa_activations = self.extra_msa_activations(extra_msa_feat)
+ extra_msa_norm = P.ExpandDims()(P.MatMul(transpose_a=True)(extra_msa_mask, extra_msa_mask), -1)
+ for i in range(self.extra_msa_stack_num):
+ extra_msa_activations, pair_activations = \
+ self.extra_msa_stack[i](extra_msa_activations, pair_activations, extra_msa_mask, extra_msa_norm,
+ mask_2d)
+
+ if self.template_enabled and self.template_embed_torsion_angles:
+ num_templ, num_res = template_aatype.shape
+ aatype_one_hot = self.template_aatype_one_hot(template_aatype)
+ torsion_angles_sin_cos, alt_torsion_angles_sin_cos, torsion_angles_mask = atom37_to_torsion_angles(
+ template_aatype, template_all_atom_positions, template_all_atom_masks, self.chi_atom_indices,
+ self.chi_angles_mask, self.mirror_psi_mask, self.chi_pi_periodic, self.indices0, self.indices1)
+ template_features = mnp.concatenate([aatype_one_hot,
+ mnp.reshape(torsion_angles_sin_cos, [num_templ, num_res, 14]),
+ mnp.reshape(alt_torsion_angles_sin_cos, [num_templ, num_res, 14]),
+ torsion_angles_mask], axis=-1)
+ template_activations = self.template_single_embedding(template_features)
+ template_activations = self.relu(template_activations)
+ template_activations = self.template_projection(template_activations)
+ msa_activations = mnp.concatenate([msa_activations, template_activations], axis=0)
+ torsion_angle_mask = torsion_angles_mask[:, :, 2]
+ msa_mask = mnp.concatenate([msa_mask, torsion_angle_mask], axis=0)
+
+ msa_mask_norm = P.ExpandDims()(P.MatMul(transpose_a=True)(msa_mask, msa_mask), -1)
+
+ if self.is_training:
+ for i in range(self.msa_stack_num):
+ msa_activations, pair_activations = self.msa_stack[i](msa_activations, pair_activations, msa_mask,
+ msa_mask_norm, mask_2d)
+ else:
+ self.idx_evoformer_block = self.idx_evoformer_block * 0
+ while self.idx_evoformer_block < self.evoformer_num_block_eval:
+ msa_activations, pair_activations = self.msa_stack(msa_activations,
+ pair_activations,
+ msa_mask,
+ msa_mask_norm,
+ mask_2d,
+ contact_act2,
+ contact_info_mask2,
+ self.idx_evoformer_block)
+ self.idx_evoformer_block += 1
+ single_activations = self.single_activations(msa_activations[0])
+ num_sequences = msa_feat.shape[0]
+ msa = msa_activations[:num_sequences, :, :]
+ msa_first_row = msa_activations[0]
+
+ final_atom_positions, _, rp_structure_module, atom14_pred_positions, final_affines, \
+ angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, sidechain_atom_pos, structure_traj = \
+ self.structure_module(single_activations,
+ pair_activations,
+ seq_mask,
+ aatype,
+ contact_act,
+ contact_info_mask,
+ residx_atom37_to_atom14,
+ atom37_atom_exists)
+ predicted_lddt_logits = self.module_lddt(rp_structure_module)
+ if self.is_training and self.train_backward:
+ predicted_lddt_logits = self.module_lddt(rp_structure_module)
+ dist_logits, bin_edges = self.module_distogram(pair_activations)
+ experimentally_logits = self.module_exp_resolved(single_activations)
+ masked_logits = self.module_mask(msa)
+ aligned_error_logits, aligned_error_breaks = self.aligned_error(pair_activations)
+ all_logits = dist_logits, bin_edges, experimentally_logits, masked_logits, aligned_error_logits, \
+ aligned_error_breaks, atom14_pred_positions, final_affines, angles_sin_cos_new, \
+ predicted_lddt_logits, structure_traj, sidechain_frames, sidechain_atom_pos, \
+ um_angles_sin_cos_new, final_atom_positions
+ return all_logits
+ final_atom_positions = P.Cast()(final_atom_positions, self._type)
+ prev_pos = final_atom_positions
+ prev_msa_first_row = msa_first_row
+ prev_pair = pair_activations
+ if self.is_training:
+ return prev_pos, prev_msa_first_row, prev_pair
+ all_val = prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits
+ return all_val
diff --git a/MindSPONGE/applications/research/FAAST/module/evoformer.py b/MindSPONGE/applications/research/FAAST/module/evoformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..22f888d8f5330fadf61f076eb2faa1ed20c08f90
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/module/evoformer.py
@@ -0,0 +1,211 @@
+# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Evoformer"""
+
+import mindspore.nn as nn
+from mindspore.ops import operations as P
+from mindsponge.cell import MSARowAttentionWithPairBias, Transition, OuterProductMean, \
+ TriangleAttention, TriangleMultiplication, \
+ MSAColumnGlobalAttention, MSAColumnAttention, MSARowAttentionWithPairBiasContact
+
+
+class EvoformerIterationContact(nn.Cell):
+ '''evoformercontact'''
+
+ def __init__(self, config, msa_act_dim, pair_act_dim, is_extra_msa, batch_size):
+ super(EvoformerIterationContact, self).__init__()
+ if is_extra_msa:
+ self.slice_cfg = config.slice.extra_msa_stack
+ else:
+ self.slice_cfg = config.slice.msa_stack
+ self.config = config
+
+ self.msa_act = MsaAct(self.config,
+ self.slice_cfg,
+ msa_act_dim,
+ pair_act_dim,
+ is_extra_msa,
+ batch_size,
+ contact=True)
+ self.pair_act = PairAct(self.config,
+ self.slice_cfg,
+ msa_act_dim,
+ pair_act_dim,
+ batch_size)
+
+ if config.is_training:
+ self.pair_act.recompute()
+
+ def construct(self, msa_act, pair_act, msa_mask, extra_msa_norm, pair_mask, contact_act2, contact_info_mask2,
+ index=None):
+ '''construct'''
+
+ msa_act = self.msa_act(msa_act, pair_act, msa_mask, contact_act2, contact_info_mask2, index)
+
+ pair_act = self.pair_act(msa_act, pair_act, msa_mask, extra_msa_norm, pair_mask, index)
+ return msa_act, pair_act
+
+
+class Evoformer(nn.Cell):
+ '''evoformer'''
+
+ def __init__(self, config, msa_act_dim, pair_act_dim, is_extra_msa, batch_size):
+ super(Evoformer, self).__init__()
+ if is_extra_msa:
+ self.slice_cfg = config.slice.extra_msa_stack
+ else:
+ self.slice_cfg = config.slice.msa_stack
+ self.config = config
+ self.msa_act = MsaAct(self.config,
+ self.slice_cfg,
+ msa_act_dim,
+ pair_act_dim,
+ is_extra_msa,
+ batch_size)
+ self.pair_act = PairAct(self.config,
+ self.slice_cfg,
+ msa_act_dim,
+ pair_act_dim,
+ batch_size)
+
+ if config.is_training:
+ self.pair_act.recompute()
+
+ def construct(self, msa_act, pair_act, msa_mask, extra_msa_norm, pair_mask, index=None):
+ '''construct'''
+ msa_act = self.msa_act(msa_act, pair_act, msa_mask, index)
+ pair_act = self.pair_act(msa_act, pair_act, msa_mask, extra_msa_norm, pair_mask, index)
+ return msa_act, pair_act
+
+
+class MsaAct(nn.Cell):
+ """MsaAct"""
+
+ def __init__(self, config, slice_cfg, msa_act_dim, pair_act_dim, is_extra_msa, batch_size, contact=False):
+ super(MsaAct, self).__init__()
+
+ self.slice_cfg = slice_cfg
+ self.config = config.evoformer
+ self.contact = contact
+ if self.contact:
+ self.msa_row_attention_with_pair_bias = MSARowAttentionWithPairBiasContact(
+ self.config.msa_row_attention_with_pair_bias.num_head,
+ msa_act_dim,
+ self.config.msa_row_attention_with_pair_bias.gating,
+ msa_act_dim,
+ pair_act_dim,
+ batch_size,
+ self.slice_cfg.msa_row_attention_with_pair_bias)
+ else:
+ self.msa_row_attention_with_pair_bias = MSARowAttentionWithPairBias(
+ self.config.msa_row_attention_with_pair_bias.num_head,
+ msa_act_dim,
+ self.config.msa_row_attention_with_pair_bias.gating,
+ msa_act_dim,
+ pair_act_dim,
+ batch_size,
+ self.slice_cfg.msa_row_attention_with_pair_bias)
+ self.msa_transition = Transition(self.config.msa_transition.num_intermediate_factor,
+ msa_act_dim,
+ batch_size,
+ self.slice_cfg.msa_transition)
+ if is_extra_msa:
+ self.attn_mod = MSAColumnGlobalAttention(self.config.msa_column_attention.num_head,
+ self.config.msa_column_attention.gating,
+ msa_act_dim,
+ batch_size,
+ self.slice_cfg.msa_column_global_attention)
+ else:
+ self.attn_mod = MSAColumnAttention(self.config.msa_column_attention.num_head,
+ msa_act_dim,
+ self.config.msa_column_attention.gating,
+ msa_act_dim,
+ batch_size,
+ self.slice_cfg.msa_column_attention)
+
+ if config.is_training:
+ self.msa_row_attention_with_pair_bias.recompute()
+ self.attn_mod.recompute()
+ self.msa_transition.recompute()
+
+ def construct(self, msa_act, pair_act, msa_mask, contact_act2=None, contact_info_mask2=None, index=None):
+ '''construct'''
+ if self.contact:
+ msa_act = P.Add()(msa_act, self.msa_row_attention_with_pair_bias(msa_act, msa_mask, pair_act, contact_act2,
+ contact_info_mask2, index))
+ else:
+ msa_act = P.Add()(msa_act, self.msa_row_attention_with_pair_bias(msa_act, msa_mask, pair_act, index))
+ msa_act = P.Add()(msa_act, self.attn_mod(msa_act, msa_mask, index))
+ msa_act = P.Add()(msa_act, self.msa_transition(msa_act, index))
+ return msa_act
+
+
+class PairAct(nn.Cell):
+ """PairAct"""
+
+ def __init__(self, config, slice_cfg, msa_act_dim, pair_act_dim, batch_size):
+ super(PairAct, self).__init__()
+ self.slice_cfg = slice_cfg
+ self.config = config.evoformer
+
+ self.outer_product_mean = OuterProductMean(self.config.outer_product_mean.num_outer_channel,
+ msa_act_dim,
+ pair_act_dim,
+ batch_size,
+ self.slice_cfg.outer_product_mean)
+
+ self.triangle_attention_starting_node = TriangleAttention(
+ self.config.triangle_attention_starting_node.orientation,
+ self.config.triangle_attention_starting_node.num_head,
+ pair_act_dim,
+ self.config.triangle_attention_starting_node.gating,
+ pair_act_dim,
+ batch_size,
+ self.slice_cfg.triangle_attention_starting_node)
+
+ self.triangle_attention_ending_node = TriangleAttention(self.config.triangle_attention_ending_node.orientation,
+ self.config.triangle_attention_ending_node.num_head,
+ pair_act_dim,
+ self.config.triangle_attention_ending_node.gating,
+ pair_act_dim,
+ batch_size,
+ self.slice_cfg.triangle_attention_ending_node)
+
+ self.pair_transition = Transition(self.config.pair_transition.num_intermediate_factor,
+ pair_act_dim,
+ batch_size,
+ self.slice_cfg.pair_transition)
+
+ self.triangle_multiplication_outgoing = TriangleMultiplication(
+ self.config.triangle_multiplication_outgoing.num_intermediate_channel,
+ self.config.triangle_multiplication_outgoing.equation,
+ layer_norm_dim=pair_act_dim,
+ batch_size=batch_size)
+
+ self.triangle_multiplication_incoming = TriangleMultiplication(
+ self.config.triangle_multiplication_incoming.num_intermediate_channel,
+ self.config.triangle_multiplication_incoming.equation,
+ layer_norm_dim=pair_act_dim,
+ batch_size=batch_size)
+
+ def construct(self, msa_act, pair_act, msa_mask, extra_msa_norm, pair_mask, index=None):
+ '''construct'''
+ pair_act = P.Add()(pair_act, self.outer_product_mean(msa_act, msa_mask, extra_msa_norm, index))
+ pair_act = P.Add()(pair_act, self.triangle_multiplication_outgoing(pair_act, pair_mask, index))
+ pair_act = P.Add()(pair_act, self.triangle_multiplication_incoming(pair_act, pair_mask, index))
+ pair_act = P.Add()(pair_act, self.triangle_attention_starting_node(pair_act, pair_mask, index))
+ pair_act = P.Add()(pair_act, self.triangle_attention_ending_node(pair_act, pair_mask, index))
+ pair_act = P.Add()(pair_act, self.pair_transition(pair_act, index))
+ return pair_act
diff --git a/MindSPONGE/applications/research/FAAST/module/head.py b/MindSPONGE/applications/research/FAAST/module/head.py
new file mode 100644
index 0000000000000000000000000000000000000000..a773ec0fa971a2279967fd5ef469408da7698d67
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/module/head.py
@@ -0,0 +1,244 @@
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""structure module"""
+import mindspore.common.dtype as mstype
+import mindspore.nn as nn
+import mindspore.numpy as mnp
+from mindspore import Tensor
+from mindspore.ops import functional as F
+from mindsponge.cell.initializer import lecun_init
+
+
+class PredictedLDDTHead(nn.Cell):
+ """Head to predict the per-residue LDDT to be used as a confidence measure."""
+
+ def __init__(self, config, seq_channel):
+ super().__init__()
+ self.config = config
+ self.input_layer_norm = nn.LayerNorm([seq_channel,], epsilon=1e-5)
+ self.act_0 = nn.Dense(seq_channel, self.config.num_channels,
+ weight_init=lecun_init(seq_channel, initializer_name='relu')
+ ).to_float(mstype.float16)
+ self.act_1 = nn.Dense(self.config.num_channels, self.config.num_channels,
+ weight_init=lecun_init(self.config.num_channels, initializer_name='relu')
+ ).to_float(mstype.float16)
+ self.logits = nn.Dense(self.config.num_channels, self.config.num_bins, weight_init='zeros'
+ ).to_float(mstype.float16)
+ self.relu = nn.ReLU()
+
+ def construct(self, rp_structure_module):
+ """Builds ExperimentallyResolvedHead module."""
+ act = rp_structure_module
+ act = self.input_layer_norm(act.astype(mstype.float32))
+ act = self.act_0(act)
+ act = self.relu(act.astype(mstype.float32))
+ act = self.act_1(act)
+ act = self.relu(act.astype(mstype.float32))
+ logits = self.logits(act)
+ return logits
+
+
+class DistogramHead(nn.Cell):
+ """Head to predict a distogram.
+
+ Jumper et al. (2021) Suppl. Sec. 1.9.8 "Distogram prediction"
+ """
+
+ def __init__(self, config, pair_dim):
+ super().__init__()
+ self.config = config
+ self.half_logits = nn.Dense(pair_dim, self.config.num_bins, weight_init='zeros')
+ self.first_break = self.config.first_break
+ self.last_break = self.config.last_break
+ self.num_bins = self.config.num_bins
+
+ def construct(self, pair):
+ """Builds DistogramHead module.
+
+ Arguments:
+ representations: Dictionary of representations, must contain:
+ * 'pair': pair representation, shape [N_res, N_res, c_z].
+
+ Returns:
+ Dictionary containing:
+ * logits: logits for distogram, shape [N_res, N_res, N_bins].
+ * bin_breaks: array containing bin breaks, shape [N_bins - 1,].
+ """
+ half_logits = self.half_logits(pair)
+
+ logits = half_logits + mnp.swapaxes(half_logits, -2, -3)
+ breaks = mnp.linspace(self.first_break, self.last_break, self.num_bins - 1)
+
+ return logits, breaks
+
+
+class ExperimentallyResolvedHead(nn.Cell):
+ """Predicts if an atom is experimentally resolved in a high-res structure.
+
+ Only trained on high-resolution X-ray crystals & cryo-EM.
+ Jumper et al. (2021) Suppl. Sec. 1.9.10 '"Experimentally resolved" prediction'
+ """
+
+ def __init__(self, seq_channel):
+ super().__init__()
+ self.logits = nn.Dense(seq_channel, 37, weight_init='zeros')
+
+ def construct(self, single):
+ """Builds ExperimentallyResolvedHead module.
+
+ Arguments:
+ representations: Dictionary of representations, must contain:
+ * 'single': Single representation, shape [N_res, c_s].
+
+ Returns:
+ Dictionary containing:
+ * 'logits': logits of shape [N_res, 37],
+ log probability that an atom is resolved in atom37 representation,
+ can be converted to probability by applying sigmoid.
+ """
+ logits = self.logits(single)
+ return logits
+
+
+class MaskedMsaHead(nn.Cell):
+ """Head to predict MSA at the masked locations.
+
+ The MaskedMsaHead employs a BERT-style objective to reconstruct a masked
+ version of the full MSA, based on a linear projection of
+ the MSA representation.
+ Jumper et al. (2021) Suppl. Sec. 1.9.9 "Masked MSA prediction"
+ """
+
+ def __init__(self, config, msa_channel):
+ super().__init__()
+ self.config = config
+ self.logits = nn.Dense(msa_channel, self.config.num_output, weight_init='zeros')
+
+ def construct(self, msa):
+ """Builds MaskedMsaHead module.
+
+ Arguments:
+ representations: Dictionary of representations, must contain:
+ * 'msa': MSA representation, shape [N_seq, N_res, c_m].
+
+ Returns:
+ Dictionary containing:
+ * 'logits': logits of shape [N_seq, N_res, N_aatype] with
+ (unnormalized) log probabilies of predicted aatype at position.
+ """
+ # del batch
+ logits = self.logits(msa)
+ return logits
+
+
+class PredictedAlignedErrorHead(nn.Cell):
+ """Head to predict the distance errors in the backbone alignment frames.
+
+ Can be used to compute predicted TM-Score.
+ Jumper et al. (2021) Suppl. Sec. 1.9.7 "TM-score prediction"
+ """
+
+ def __init__(self, config, pair_dim):
+ super().__init__()
+ self.config = config
+ self.num_bins = self.config.num_bins
+ self.max_error_bin = self.config.max_error_bin
+ self.logits = nn.Dense(pair_dim, self.num_bins, weight_init='zeros')
+
+ def construct(self, pair):
+ """Builds PredictedAlignedErrorHead module.
+
+ Arguments:
+ * 'pair': pair representation, shape [N_res, N_res, c_z].
+
+ Returns:
+ * logits: logits for aligned error, shape [N_res, N_res, N_bins].
+ * breaks: array containing bin breaks, shape [N_bins - 1].
+ """
+ logits = self.logits(pair)
+ breaks = mnp.linspace(0, self.max_error_bin, self.num_bins - 1)
+ return logits, breaks
+
+
+class EstogramHead(nn.Cell):
+ """Head to predict estogram."""
+
+ def __init__(self, first_break, last_break, num_bins):
+ super().__init__()
+ self.first_break = first_break
+ self.last_break = last_break
+ self.num_bins = num_bins
+
+ self.breaks = mnp.linspace(self.first_break, self.last_break, self.num_bins)
+ self.width = self.breaks[1] - self.breaks[0]
+
+ self.centers = self.breaks + 0.5 * self.width
+
+ self.softmax = nn.Softmax(-1)
+ self.zero = Tensor([0.])
+
+ def compute_estogram(self, distogram_logits, decoy_distance_mat):
+ """compute estogram matrix.
+ Arguments:
+ distogram_logits: [N_res, N_res, N_bins].
+ decoy_distance_mat: [N_res, N_res]
+ Returns:
+ estogram: shape [N_res, N_res, N_bins].
+ esto_centers: shape [N_res, N_res, N_bins].
+ """
+ square_centers = mnp.reshape(self.centers, (1, 1, -1))
+ estogram = self.softmax(distogram_logits)
+ esto_centers = square_centers - mnp.expand_dims(decoy_distance_mat, -1)
+ return estogram, esto_centers
+
+ def construct(self, distogram_logits, pseudo_beta, pseudo_beta_mask, cutoff=15.):
+ """construct"""
+ positions = pseudo_beta
+ pad_mask = mnp.expand_dims(pseudo_beta_mask, 1)
+ pad_mask_2d = pad_mask * mnp.transpose(pad_mask, (1, 0))
+ pad_mask_2d *= (1. - mnp.eye(pad_mask_2d.shape[1]))
+
+ dist_xyz = mnp.square(mnp.expand_dims(positions, axis=1) - mnp.expand_dims(positions, axis=0))
+ dmat_decoy = mnp.sqrt(1e-10 + mnp.sum(dist_xyz.astype(mstype.float32), -1))
+
+ estogram, esto_centers = self.compute_estogram(distogram_logits, dmat_decoy)
+ pair_errors = mnp.sum(estogram * esto_centers, -1)
+
+ p1 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 0.5).astype(mnp.float32)
+ p2 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 1.0).astype(mnp.float32)
+ p3 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 2.0).astype(mnp.float32)
+ p4 = self._integrate(distogram_logits, mnp.abs(esto_centers) < 4.0).astype(mnp.float32)
+
+ p0 = self._integrate(distogram_logits, self.centers < cutoff).astype(mnp.float32)
+ pred_mask2d = p0 * pad_mask_2d
+
+ norm = mnp.sum(pred_mask2d, -1) + 1e-6
+ p1 = mnp.sum(p1 * pred_mask2d, -1)
+ p2 = mnp.sum(p2 * pred_mask2d, -1)
+ p3 = mnp.sum(p3 * pred_mask2d, -1)
+ p4 = mnp.sum(p4 * pred_mask2d, -1)
+
+ plddt = 0.25 * (p1 + p2 + p3 + p4) / norm
+
+ return plddt, pred_mask2d, pair_errors
+
+ def _integrate(self, distogram_logits, integrate_masks):
+ """compute estogram matrix.
+ Arguments:
+ distogram_logits: [N_res, N_res, N_bins].
+ integrate_masks: [N_res, N_res, N_bins]
+ Returns:
+ v: shape [N_res, N_res].
+ """
+ probs = self.softmax(distogram_logits)
+ integrate_masks = F.cast(integrate_masks, mnp.float32)
+ v = mnp.sum(probs * integrate_masks, -1)
+ return v
diff --git a/MindSPONGE/applications/research/FAAST/module/structure.py b/MindSPONGE/applications/research/FAAST/module/structure.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d6e66c20d4bb3ae97e3c8e6a28066fe8c23eea8
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/module/structure.py
@@ -0,0 +1,490 @@
+# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""structure module"""
+import numpy as np
+import mindspore.common.dtype as mstype
+import mindspore.nn as nn
+import mindspore.numpy as mnp
+import mindspore.ops as ops
+from mindspore import Tensor, Parameter
+from mindspore.ops import functional as F
+import mindsponge.common.residue_constants as residue_constants
+from mindsponge.cell.initializer import lecun_init
+from mindsponge.common.utils import torsion_angles_to_frames, frames_and_literature_positions_to_atom14_pos, \
+ atom14_to_atom37, pseudo_beta_fn
+from mindsponge.common.geometry import initial_affine, quaternion_to_tensor, pre_compose, vecs_scale, \
+ vecs_to_tensor, vecs_expand_dims, rots_expand_dims, apply_to_point, invert_point
+
+
+class InvariantPointContactAttention(nn.Cell):
+ r"""
+ Invariant Point attention module.
+ This module is used to update the sequence representation ,which is the first input--inputs_1d,
+ adding location information to the sequence representation.
+
+ The attention consists of three parts, namely, q, k, v obtained by the sequence representation,
+ q'k'v' obtained by the interaction between the sequence representation and the rigid body group,
+ and b , which is th bias, obtained from the pair representation (the second inputs -- inputs_2d).
+
+ .. math::
+ a_{ij} = Softmax(w_l(c_1{q_i}^Tk_j+b{ij}-c_2\sum {\left \| T_i\circ q'_i-T_j\circ k'_j \right \| ^{2 } })
+
+ where i and j represent the ith and jth amino acids in the sequence, respectively,
+ and T is the rotation and translation in the input.
+
+ `Jumper et al. (2021) Suppl. Alg. 22 "InvariantPointContactAttention"
+ `_.
+
+ Args:
+ num_head (int): The number of the heads.
+ num_scalar_qk (int): The number of the scalar query/key.
+ num_scalar_v (int): The number of the scalar value.
+ num_point_v (int): The number of the point value.
+ num_point_qk (int): The number of the point query/key.
+ num_channel (int): The number of the channel.
+ pair_dim (int): The last dimension length of pair.
+
+ Inputs:
+ - **inputs_1d** (Tensor) - The first row of msa representation which is the output of evoformer module,
+ also called the sequence representation, shape :math:`[N_{res}, num\_channel]`.
+ - **inputs_2d** (Tensor) - The pair representation which is the output of evoformer module,
+ shape :math:`[N_{res}, N_{res}, pair\_dim]`.
+ - **mask** (Tensor) - A mask that determines which elements of inputs_1d are involved in the
+ attention calculation, shape :math:`[N_{res}, 1]`
+ - **rotation** (tuple) - A rotation term in a rigid body group T(r,t),
+ A tuple of length 9, The shape of each elements in the tuple is :math:`[N_{res}]`.
+ - **translation** (tuple) - A translation term in a rigid body group T(r,t),
+ A tuple of length 3, The shape of each elements in the tuple is :math:`[N_{res}]`.
+
+ Outputs:
+ Tensor, the update of inputs_1d, shape :math:`[N_{res}, num\_channel]`.
+
+ Supported Platforms:
+ ``Ascend`` ``GPU``
+
+ Examples:
+ >>> import numpy as np
+ >>> from mindsponge.cell import InvariantPointContactAttention
+ >>> from mindspore import dtype as mstype
+ >>> from mindspore import Tensor
+ >>> import mindspore.context as context
+ >>> context.set_context(mode=context.GRAPH_MODE)
+ >>> model = InvariantPointContactAttention(num_head=12, num_scalar_qk=16, num_scalar_v=16,
+ ... num_point_v=8, num_point_qk=4,
+ ... num_channel=384, pair_dim=128)
+ >>> inputs_1d = Tensor(np.ones((256, 384)), mstype.float32)
+ >>> inputs_2d = Tensor(np.ones((256, 256, 128)), mstype.float32)
+ >>> mask = Tensor(np.ones((256, 1)), mstype.float32)
+ >>> rotation = tuple([Tensor(np.ones(256), mstype.float16) for _ in range(9)])
+ >>> translation = tuple([Tensor(np.ones(256), mstype.float16) for _ in range(3)])
+ >>> attn_out = model(inputs_1d, inputs_2d, mask, rotation, translation)
+ >>> print(attn_out.shape)
+ (256, 384)
+ """
+
+ def __init__(self, num_head, num_scalar_qk, num_scalar_v, num_point_v, num_point_qk, num_channel, pair_dim):
+ super(InvariantPointContactAttention, self).__init__()
+
+ self._dist_epsilon = 1e-8
+ self.num_head = num_head
+ self.num_scalar_qk = num_scalar_qk
+ self.num_scalar_v = num_scalar_v
+ self.num_point_v = num_point_v
+ self.num_point_qk = num_point_qk
+ self.num_channel = num_channel
+ self.projection_num = self.num_head * self.num_scalar_v + self.num_head * self.num_point_v * 4 + \
+ self.num_head * pair_dim
+ self.q_scalar = nn.Dense(self.num_channel, self.num_head * self.num_scalar_qk,
+ weight_init=lecun_init(self.num_channel))
+ self.kv_scalar = nn.Dense(self.num_channel, self.num_head * (self.num_scalar_qk + self.num_scalar_v),
+ weight_init=lecun_init(self.num_channel))
+ self.q_point_local = nn.Dense(self.num_channel, self.num_head * 3 * self.num_point_qk,
+ weight_init=lecun_init(self.num_channel)
+ )
+ self.kv_point_local = nn.Dense(self.num_channel, self.num_head * 3 * (self.num_point_qk + self.num_point_v),
+ weight_init=lecun_init(self.num_channel))
+ self.contact_layer = nn.Dense(32, self.num_head)
+ self.soft_max = nn.Softmax()
+ self.soft_plus = ops.Softplus()
+ self.trainable_point_weights = Parameter(Tensor(np.ones((12,)), mstype.float32), name="trainable_point_weights")
+ self.attention_2d = nn.Dense(pair_dim, self.num_head, weight_init=lecun_init(pair_dim))
+ self.output_projection = nn.Dense(self.projection_num, self.num_channel, weight_init='zeros'
+ )
+ self.scalar_weights = Tensor(np.sqrt(1.0 / (3 * 16)).astype(np.float32))
+ self.point_weights = Tensor(np.sqrt(1.0 / (3 * 18)).astype(np.float32))
+ self.attention_2d_weights = Tensor(np.sqrt(1.0 / 3).astype(np.float32))
+
+ def construct(self, inputs_1d, inputs_2d, mask, rotation, translation, contact_act=None, contact_info_mask=None):
+ '''construct'''
+ num_residues, _ = inputs_1d.shape
+
+ # Improve readability by removing a large number of 'self's.
+ num_head = self.num_head
+ num_scalar_qk = self.num_scalar_qk
+ num_point_qk = self.num_point_qk
+ num_scalar_v = self.num_scalar_v
+ num_point_v = self.num_point_v
+
+ # Construct scalar queries of shape:
+ q_scalar = self.q_scalar(inputs_1d)
+ q_scalar = mnp.reshape(q_scalar, [num_residues, num_head, num_scalar_qk])
+
+ # Construct scalar keys/values of shape:
+ kv_scalar = self.kv_scalar(inputs_1d)
+ kv_scalar = mnp.reshape(kv_scalar, [num_residues, num_head, num_scalar_v + num_scalar_qk])
+ k_scalar, v_scalar = mnp.split(kv_scalar, [num_scalar_qk], axis=-1)
+
+ # Construct query points of shape:
+ # First construct query points in local frame.
+ q_point_local = self.q_point_local(inputs_1d)
+
+ q_point_local = mnp.split(q_point_local, 3, axis=-1)
+ q_point_local = (ops.Squeeze()(q_point_local[0]), ops.Squeeze()(q_point_local[1]),
+ ops.Squeeze()(q_point_local[2]))
+ # Project query points into global frame.
+ q_point_global = apply_to_point(rotation, translation, q_point_local, 1)
+
+ # Reshape query point for later use.
+ q_point0 = mnp.reshape(q_point_global[0], (num_residues, num_head, num_point_qk))
+ q_point1 = mnp.reshape(q_point_global[1], (num_residues, num_head, num_point_qk))
+ q_point2 = mnp.reshape(q_point_global[2], (num_residues, num_head, num_point_qk))
+
+ # Construct key and value points.
+ # Key points have shape [num_residues, num_head, num_point_qk]
+ # Value points have shape [num_residues, num_head, num_point_v]
+
+ # Construct key and value points in local frame.
+ kv_point_local = self.kv_point_local(inputs_1d)
+
+ kv_point_local = mnp.split(kv_point_local, 3, axis=-1)
+ kv_point_local = (ops.Squeeze()(kv_point_local[0]), ops.Squeeze()(kv_point_local[1]),
+ ops.Squeeze()(kv_point_local[2]))
+ # Project key and value points into global frame.
+ kv_point_global = apply_to_point(rotation, translation, kv_point_local, 1)
+
+ kv_point_global0 = mnp.reshape(kv_point_global[0], (num_residues, num_head, (num_point_qk + num_point_v)))
+ kv_point_global1 = mnp.reshape(kv_point_global[1], (num_residues, num_head, (num_point_qk + num_point_v)))
+ kv_point_global2 = mnp.reshape(kv_point_global[2], (num_residues, num_head, (num_point_qk + num_point_v)))
+
+ # Split key and value points.
+ k_point0, v_point0 = mnp.split(kv_point_global0, [num_point_qk], axis=-1)
+ k_point1, v_point1 = mnp.split(kv_point_global1, [num_point_qk], axis=-1)
+ k_point2, v_point2 = mnp.split(kv_point_global2, [num_point_qk], axis=-1)
+
+ trainable_point_weights = self.soft_plus(self.trainable_point_weights)
+ point_weights = self.point_weights * ops.expand_dims(trainable_point_weights, axis=1)
+
+ v_point = [mnp.swapaxes(v_point0, -2, -3), mnp.swapaxes(v_point1, -2, -3), mnp.swapaxes(v_point2, -2, -3)]
+ q_point = [mnp.swapaxes(q_point0, -2, -3), mnp.swapaxes(q_point1, -2, -3), mnp.swapaxes(q_point2, -2, -3)]
+ k_point = [mnp.swapaxes(k_point0, -2, -3), mnp.swapaxes(k_point1, -2, -3), mnp.swapaxes(k_point2, -2, -3)]
+
+ dist2 = ops.Square()(ops.expand_dims(q_point[0], 2) - ops.expand_dims(k_point[0], 1)) + \
+ ops.Square()(ops.expand_dims(q_point[1], 2) - ops.expand_dims(k_point[1], 1)) + \
+ ops.Square()(ops.expand_dims(q_point[2], 2) - ops.expand_dims(k_point[2], 1))
+
+ attn_qk_point = -0.5 * mnp.sum(ops.expand_dims(ops.expand_dims(point_weights, 1), 1) * dist2, axis=-1)
+
+ v = mnp.swapaxes(v_scalar, -2, -3)
+ q = mnp.swapaxes(self.scalar_weights * q_scalar, -2, -3)
+ k = mnp.swapaxes(k_scalar, -2, -3)
+ attn_qk_scalar = ops.matmul(q, mnp.swapaxes(k, -2, -1))
+ attn_logits = attn_qk_scalar + attn_qk_point
+
+ attention_2d = self.attention_2d(inputs_2d)
+ attention_2d = mnp.transpose(attention_2d, [2, 0, 1])
+ attention_2d = self.attention_2d_weights * attention_2d
+
+ attn_logits += attention_2d
+
+ # modify wch
+ contact_act = self.contact_layer(contact_act)
+ contact_act = ops.Transpose()(contact_act, (2, 0, 1))
+ contact_act = contact_act * contact_info_mask[None, :, :]
+
+ attn_logits += contact_act
+
+ mask_2d = mask * mnp.swapaxes(mask, -1, -2)
+ attn_logits -= 50 * (1. - mask_2d)
+
+ attn = self.soft_max(attn_logits)
+
+ result_scalar = ops.matmul(attn, v)
+
+ result_point_global = [mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[0][:, None, :, :], axis=-2), -2, -3),
+ mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[1][:, None, :, :], axis=-2), -2, -3),
+ mnp.swapaxes(mnp.sum(attn[:, :, :, None] * v_point[2][:, None, :, :], axis=-2), -2, -3)
+ ]
+
+ result_point_global = [mnp.reshape(result_point_global[0], [num_residues, num_head * num_point_v]),
+ mnp.reshape(result_point_global[1], [num_residues, num_head * num_point_v]),
+ mnp.reshape(result_point_global[2], [num_residues, num_head * num_point_v])]
+ result_scalar = mnp.swapaxes(result_scalar, -2, -3)
+
+ result_scalar = mnp.reshape(result_scalar, [num_residues, num_head * num_scalar_v])
+
+ result_point_local = invert_point(result_point_global, rotation, translation, 1)
+
+ output_feature1 = result_scalar
+ output_feature20 = result_point_local[0]
+ output_feature21 = result_point_local[1]
+ output_feature22 = result_point_local[2]
+
+ output_feature3 = mnp.sqrt(self._dist_epsilon +
+ ops.Square()(result_point_local[0]) +
+ ops.Square()(result_point_local[1]) +
+ ops.Square()(result_point_local[2]))
+
+ result_attention_over_2d = ops.matmul(mnp.swapaxes(attn, 0, 1), inputs_2d)
+ num_out = num_head * result_attention_over_2d.shape[-1]
+ output_feature4 = mnp.reshape(result_attention_over_2d, [num_residues, num_out])
+
+ final_act = mnp.concatenate([output_feature1, output_feature20, output_feature21,
+ output_feature22, output_feature3, output_feature4], axis=-1)
+ final_result = self.output_projection(final_act)
+ return final_result
+
+
+class MultiRigidSidechain(nn.Cell):
+ """Class to make side chain atoms."""
+
+ def __init__(self, config, single_repr_dim):
+ super().__init__()
+ self.config = config
+ self.input_projection = nn.Dense(single_repr_dim, self.config.num_channel,
+ weight_init=lecun_init(single_repr_dim))
+ self.input_projection_1 = nn.Dense(single_repr_dim, self.config.num_channel,
+ weight_init=lecun_init(single_repr_dim))
+ self.relu = nn.ReLU()
+ self.resblock1 = nn.Dense(self.config.num_channel, self.config.num_channel,
+ weight_init=lecun_init(self.config.num_channel,
+ initializer_name='relu'))
+ self.resblock2 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros')
+ self.resblock1_1 = nn.Dense(self.config.num_channel, self.config.num_channel,
+ weight_init=lecun_init(self.config.num_channel, initializer_name='relu'))
+ self.resblock2_1 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros')
+ self.unnormalized_angles = nn.Dense(self.config.num_channel, 14,
+ weight_init=lecun_init(self.config.num_channel))
+ self.restype_atom14_to_rigid_group = Tensor(residue_constants.restype_atom14_to_rigid_group)
+ self.restype_atom14_rigid_group_positions = Tensor(residue_constants.restype_atom14_rigid_group_positions)
+ self.restype_atom14_mask = Tensor(residue_constants.restype_atom14_mask)
+ self.restype_rigid_group_default_frame = Tensor(residue_constants.restype_rigid_group_default_frame)
+ self.l2_normalize = ops.L2Normalize(axis=-1, epsilon=1e-12)
+
+ def construct(self, rotation, translation, act, initial_act, aatype):
+ """Predict side chains using rotation and translation representations.
+
+ Args:
+ rotation: The rotation matrices.
+ translation: A translation matrices.
+ act: updated pair activations from structure module
+ initial_act: initial act representations (input of structure module)
+ aatype: Amino acid type representations
+
+ Returns:
+ angles, positions and new frames
+ """
+
+ act1 = self.input_projection(self.relu(act))
+ init_act1 = self.input_projection_1(self.relu(initial_act))
+ # Sum the activation list (equivalent to concat then Linear).
+ act = act1 + init_act1
+
+ # Mapping with some residual blocks.
+ # resblock1
+ old_act = act
+ act = self.resblock1(self.relu(act))
+ act = self.resblock2(self.relu(act))
+ act += old_act
+ # resblock2
+ old_act = act
+ act = self.resblock1_1(self.relu(act))
+ act = self.resblock2_1(self.relu(act))
+ act += old_act
+
+ # Map activations to torsion angles. Shape: (num_res, 14).
+ num_res = act.shape[0]
+ unnormalized_angles = self.unnormalized_angles(self.relu(act))
+
+ unnormalized_angles = mnp.reshape(unnormalized_angles, [num_res, 7, 2])
+ angles = self.l2_normalize(unnormalized_angles)
+
+ backb_to_global = ((rotation[0], rotation[1], rotation[2],
+ rotation[3], rotation[4], rotation[5],
+ rotation[6], rotation[7], rotation[8]),
+ (translation[0], translation[1], translation[2]))
+
+ all_frames_to_global = torsion_angles_to_frames(aatype, backb_to_global, angles,
+ self.restype_rigid_group_default_frame)
+
+ pred_positions = frames_and_literature_positions_to_atom14_pos(aatype, all_frames_to_global,
+ self.restype_atom14_to_rigid_group,
+ self.restype_atom14_rigid_group_positions,
+ self.restype_atom14_mask)
+
+ atom_pos = pred_positions
+ frames = all_frames_to_global
+ res = (angles, unnormalized_angles, atom_pos, frames)
+ return res
+
+
+class FoldIteration(nn.Cell):
+ """A single iteration of the main structure module loop."""
+
+ def __init__(self, config, pair_dim, single_repr_dim):
+ super().__init__()
+ self.config = config
+ self.drop_out = nn.Dropout(p=0.1)
+ self.attention_layer_norm = nn.LayerNorm([self.config.num_channel,], epsilon=1e-5)
+ self.transition_layer_norm = nn.LayerNorm([self.config.num_channel,], epsilon=1e-5)
+ self.transition = nn.Dense(self.config.num_channel, config.num_channel,
+ weight_init=lecun_init(self.config.num_channel, initializer_name='relu'))
+ self.transition_1 = nn.Dense(self.config.num_channel, self.config.num_channel,
+ weight_init=lecun_init(self.config.num_channel, initializer_name='relu'))
+ self.transition_2 = nn.Dense(self.config.num_channel, self.config.num_channel, weight_init='zeros')
+ self.relu = nn.ReLU()
+ self.affine_update = nn.Dense(self.config.num_channel, 6, weight_init='zeros')
+ self.attention_module = InvariantPointContactAttention(self.config.num_head,
+ self.config.num_scalar_qk,
+ self.config.num_scalar_v,
+ self.config.num_point_v,
+ self.config.num_point_qk,
+ self.config.num_channel,
+ pair_dim)
+ self.mu_side_chain = MultiRigidSidechain(self.config.sidechain, single_repr_dim)
+ self.print = ops.Print()
+
+ def construct(self, act, static_feat_2d, sequence_mask, quaternion, rotation, \
+ translation, initial_act, aatype, contact_act2, contact_info_mask2):
+ """construct"""
+ attn = self.attention_module(act, static_feat_2d, sequence_mask, \
+ rotation, translation, contact_act2, contact_info_mask2)
+ act += attn
+ act = self.drop_out(act)
+ act = self.attention_layer_norm(act)
+ # Transition
+ input_act = act
+ act = self.transition(act)
+ act = self.relu(act)
+ act = self.transition_1(act)
+ act = self.relu(act)
+ act = self.transition_2(act)
+
+ act += input_act
+ act = self.drop_out(act)
+ act = self.transition_layer_norm(act)
+
+ # This block corresponds to
+ # Jumper et al. (2021) Alg. 23 "Backbone update"
+ # Affine update
+ affine_update = self.affine_update(act)
+ quaternion, rotation, translation = pre_compose(quaternion, rotation, translation, affine_update)
+ translation1 = vecs_scale(translation, 10.0)
+ rotation1 = rotation
+ angles_sin_cos, unnormalized_angles_sin_cos, atom_pos, frames = \
+ self.mu_side_chain(rotation1, translation1, act, initial_act, aatype)
+
+ affine_output = quaternion_to_tensor(quaternion, translation)
+ quaternion = F.stop_gradient(quaternion)
+ rotation = F.stop_gradient(rotation)
+ res = (act, quaternion, translation, rotation, affine_output, angles_sin_cos, unnormalized_angles_sin_cos, \
+ atom_pos, frames)
+ return res
+
+
+class StructureModule(nn.Cell):
+ """StructureModule as a network head."""
+
+ def __init__(self, config, single_repr_dim, pair_dim):
+ super(StructureModule, self).__init__()
+ self.config = config.structure_module
+ self.seq_length = config.seq_length
+ self.fold_iteration = FoldIteration(self.config, pair_dim, single_repr_dim)
+ self.single_layer_norm = nn.LayerNorm([single_repr_dim,], epsilon=1e-5)
+ self.initial_projection = nn.Dense(single_repr_dim, self.config.num_channel,
+ weight_init=lecun_init(single_repr_dim))
+ self.pair_layer_norm = nn.LayerNorm([pair_dim,], epsilon=1e-5)
+ self.num_layer = self.config.num_layer
+ self.indice0 = Tensor(
+ np.arange(self.seq_length).reshape((-1, 1, 1)).repeat(37, axis=1).astype("int32"))
+ self.traj_w = Tensor(np.array([1.] * 4 + [self.config.position_scale] * 3), mstype.float32)
+ self.use_sumcons = True
+
+ def construct(self, single, pair, seq_mask, aatype, contact_act2, contact_info_mask2, residx_atom37_to_atom14=None,
+ atom37_atom_exists=None):
+ """construct"""
+ sequence_mask = seq_mask[:, None]
+ act = self.single_layer_norm(single)
+ initial_act = act
+ act = self.initial_projection(act)
+ quaternion, rotation, translation = initial_affine(self.seq_length)
+ act_2d = self.pair_layer_norm(pair)
+ # folder iteration
+ atom_pos, affine_output_new, angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, act_iter = \
+ self.iteration_operation(act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype,
+ contact_act2, contact_info_mask2)
+ atom14_pred_positions = vecs_to_tensor(atom_pos)[-1]
+ sidechain_atom_pos = atom_pos
+
+ atom37_pred_positions = atom14_to_atom37(atom14_pred_positions,
+ residx_atom37_to_atom14,
+ atom37_atom_exists,
+ self.indice0)
+
+ structure_traj = affine_output_new * self.traj_w
+ final_affines = affine_output_new[-1]
+ final_atom_positions = atom37_pred_positions
+ final_atom_mask = atom37_atom_exists
+ rp_structure_module = act_iter
+ if self.use_sumcons:
+ pseudo_beta_pred = pseudo_beta_fn(aatype, atom37_pred_positions, None)
+ coord_diffs = pseudo_beta_pred[None] - pseudo_beta_pred[:, None]
+ distance = ops.Sqrt()(ops.ReduceSum()(ops.Square()(coord_diffs), -1) + 1e-8)
+ scale = (8.10 / distance - 1) * contact_info_mask2 * (distance > 8.10)
+ contact_translation_2 = scale[:, :, None] * coord_diffs / 2
+ contact_translation = ops.ReduceSum(keep_dims=True)(contact_translation_2, 1)
+ atom14_pred_positions = atom14_pred_positions - contact_translation
+ final_atom_positions = final_atom_positions - contact_translation
+ res = (final_atom_positions, final_atom_mask, rp_structure_module, atom14_pred_positions, final_affines, \
+ angles_sin_cos_new, um_angles_sin_cos_new, sidechain_frames, sidechain_atom_pos, structure_traj)
+ return res
+
+ def iteration_operation(self, act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act,
+ aatype, contact_act2, contact_info_mask2):
+ """iteration_operation"""
+ affine_init = ()
+ angles_sin_cos_init = ()
+ um_angles_sin_cos_init = ()
+ atom_pos_batch = ()
+ frames_batch = ()
+
+ for _ in range(self.num_layer):
+ act, quaternion, translation, rotation, affine_output, angles_sin_cos, unnormalized_angles_sin_cos, \
+ atom_pos, frames = \
+ self.fold_iteration(act, act_2d, sequence_mask, quaternion, rotation, translation, initial_act, aatype,
+ contact_act2, contact_info_mask2)
+
+ affine_init = affine_init + (affine_output[None, ...],)
+ angles_sin_cos_init = angles_sin_cos_init + (angles_sin_cos[None, ...],)
+ um_angles_sin_cos_init = um_angles_sin_cos_init + (unnormalized_angles_sin_cos[None, ...],)
+ atom_pos_batch += (mnp.concatenate(vecs_expand_dims(atom_pos, 0), axis=0)[:, None, ...],)
+ frames_batch += (mnp.concatenate(rots_expand_dims(frames[0], 0) +
+ vecs_expand_dims(frames[1], 0), axis=0)[:, None, ...],)
+ affine_output_new = mnp.concatenate(affine_init, axis=0)
+ angles_sin_cos_new = mnp.concatenate(angles_sin_cos_init, axis=0)
+ um_angles_sin_cos_new = mnp.concatenate(um_angles_sin_cos_init, axis=0)
+ frames_new = mnp.concatenate(frames_batch, axis=1)
+ atom_pos_new = mnp.concatenate(atom_pos_batch, axis=1)
+ res = (atom_pos_new, affine_output_new, angles_sin_cos_new, um_angles_sin_cos_new, frames_new, act)
+ return res
diff --git a/MindSPONGE/applications/research/FAAST/module/template_embedding.py b/MindSPONGE/applications/research/FAAST/module/template_embedding.py
new file mode 100644
index 0000000000000000000000000000000000000000..c01f29a8718e0d08c2737a75b86c97643d6f97c7
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/module/template_embedding.py
@@ -0,0 +1,226 @@
+# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+'''TEMPLATE'''
+import mindspore.common.dtype as mstype
+import mindspore.nn as nn
+from mindspore.ops import functional as F
+from mindspore.ops import operations as P
+from mindsponge.cell.initializer import lecun_init
+from mindsponge.common.utils import dgram_from_positions, _memory_reduce
+from mindsponge.common.geometry import make_transform_from_reference, quat_affine, invert_point
+from mindsponge.common.residue_constants import atom_order
+from mindsponge.cell import Attention, TriangleAttention, Transition, TriangleMultiplication
+
+
+class TemplatePairStack(nn.Cell):
+ '''template pair stack'''
+
+ def __init__(self, config):
+ super(TemplatePairStack, self).__init__()
+ self.config = config.template.template_pair_stack
+ self.num_block = self.config.num_block
+ batch_size = 0
+ self.slice = config.slice.template_pair_stack
+ start_node_cfg = self.config.triangle_attention_starting_node
+ self.triangle_attention_starting_node = TriangleAttention(start_node_cfg.orientation,
+ start_node_cfg.num_head,
+ start_node_cfg.key_dim,
+ start_node_cfg.gating,
+ 64,
+ batch_size,
+ self.slice.triangle_attention_starting_node)
+ end_node_cfg = self.config.triangle_attention_ending_node
+ self.triangle_attention_ending_node = TriangleAttention(end_node_cfg.orientation,
+ end_node_cfg.num_head,
+ end_node_cfg.key_dim,
+ end_node_cfg.gating,
+ 64,
+ batch_size,
+ self.slice.triangle_attention_ending_node)
+ # Hard Code
+ self.pair_transition = Transition(self.config.pair_transition.num_intermediate_factor,
+ 64,
+ batch_size,
+ self.slice.pair_transition)
+
+ mul_outgoing_cfg = self.config.triangle_multiplication_outgoing
+ self.triangle_multiplication_outgoing = TriangleMultiplication(mul_outgoing_cfg.num_intermediate_channel,
+ mul_outgoing_cfg.equation,
+ layer_norm_dim=64,
+ batch_size=batch_size)
+ mul_incoming_cfg = self.config.triangle_multiplication_incoming
+ self.triangle_multiplication_incoming = TriangleMultiplication(mul_incoming_cfg.num_intermediate_channel,
+ mul_incoming_cfg.equation,
+ layer_norm_dim=64,
+ batch_size=batch_size)
+
+ def construct(self, pair_act, pair_mask, index=None):
+ if not self.num_block:
+ return pair_act
+
+ pair_act = pair_act + self.triangle_attention_starting_node(pair_act, pair_mask, index)
+ pair_act = pair_act + self.triangle_attention_ending_node(pair_act, pair_mask, index)
+ pair_act = pair_act + self.triangle_multiplication_outgoing(pair_act, pair_mask, index)
+ pair_act = pair_act + self.triangle_multiplication_incoming(pair_act, pair_mask, index)
+ pair_act = pair_act + self.pair_transition(pair_act, index)
+ return pair_act
+
+
+class SingleTemplateEmbedding(nn.Cell):
+ '''single template embedding'''
+
+ def __init__(self, config, mixed_precision):
+ super(SingleTemplateEmbedding, self).__init__()
+ self.config = config.template
+ if mixed_precision:
+ self._type = mstype.float16
+ else:
+ self._type = mstype.float32
+ self.num_bins = self.config.dgram_features.num_bins
+ self.min_bin = self.config.dgram_features.min_bin
+ self.max_bin = self.config.dgram_features.max_bin
+
+ self.num_channels = (self.config.template_pair_stack.triangle_attention_ending_node.value_dim)
+ self.embedding2d = nn.Dense(88, self.num_channels,
+ weight_init=lecun_init(88, initializer_name='relu'))
+ # if is_training:
+ template_layers = nn.CellList()
+ for _ in range(self.config.template_pair_stack.num_block):
+ template_pair_stack_block = TemplatePairStack(config)
+ template_pair_stack_block.recompute()
+ template_layers.append(template_pair_stack_block)
+ self.template_pair_stack = template_layers
+
+ self.one_hot = nn.OneHot(depth=22, axis=-1)
+ self.n, self.ca, self.c = [atom_order[a] for a in ('N', 'CA', 'C')]
+
+ self.use_template_unit_vector = self.config.use_template_unit_vector
+ layer_norm_dim = 64
+ self.output_layer_norm = nn.LayerNorm([layer_norm_dim,], epsilon=1e-5)
+ self.num_block = self.config.template_pair_stack.num_block
+ self.batch_block = 4
+
+ def construct(self, mask_2d, template_aatype, template_all_atom_masks, template_all_atom_positions,
+ template_pseudo_beta_mask, template_pseudo_beta):
+ '''construct'''
+ num_res = template_aatype[0, ...].shape[0]
+ template_mask_2d_temp = P.ExpandDims()(template_pseudo_beta_mask, -1) * \
+ P.ExpandDims()(template_pseudo_beta_mask, 1)
+ template_dgram_temp = dgram_from_positions(template_pseudo_beta, self.num_bins, self.min_bin,
+ self.max_bin, self._type)
+
+ to_concat_temp = (template_dgram_temp, P.ExpandDims()(template_mask_2d_temp, -1))
+ aatype_temp = self.one_hot(template_aatype)
+ aatype_temp = P.Cast()(aatype_temp, self._type)
+ to_concat_temp = to_concat_temp + (P.Tile()(P.ExpandDims()(aatype_temp, 1), (1, num_res, 1, 1)),
+ P.Tile()(P.ExpandDims()(aatype_temp, 2), (1, 1, num_res, 1)))
+
+ rot_temp, trans_temp = make_transform_from_reference(template_all_atom_positions[:, :, self.n],
+ template_all_atom_positions[:, :, self.ca],
+ template_all_atom_positions[:, :, self.c])
+
+ _, rotation_tmp, translation_tmp = quat_affine(None, trans_temp, rot_temp)
+ points_tmp = [P.ExpandDims()(translation_tmp[0], -2),
+ P.ExpandDims()(translation_tmp[1], -2),
+ P.ExpandDims()(translation_tmp[2], -2)]
+ affine_vec_tmp = invert_point(points_tmp, rotation_tmp, translation_tmp, extra_dims=1)
+ inv_distance_scalar_tmp = P.Rsqrt()(1e-6 + P.Square()(affine_vec_tmp[0]) + P.Square()(affine_vec_tmp[1]) + \
+ P.Square()(affine_vec_tmp[2]))
+ template_mask_tmp = (template_all_atom_masks[:, :, self.n] *
+ template_all_atom_masks[:, :, self.ca] *
+ template_all_atom_masks[:, :, self.c])
+ template_mask_2d_tmp = P.ExpandDims()(template_mask_tmp, -1) * P.ExpandDims()(template_mask_tmp, 1)
+
+ inv_distance_scalar_tmp = inv_distance_scalar_tmp * template_mask_2d_tmp
+ unit_vector_tmp = (P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[0], -1),
+ P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[1], -1),
+ P.ExpandDims()(inv_distance_scalar_tmp * affine_vec_tmp[2], -1))
+
+ if not self.use_template_unit_vector:
+ unit_vector_tmp = (P.ZerosLike()(unit_vector_tmp[0]), P.ZerosLike()(unit_vector_tmp[1]),
+ P.ZerosLike()(unit_vector_tmp[2]))
+ to_concat_temp = to_concat_temp + unit_vector_tmp + (P.ExpandDims()(template_mask_2d_tmp, -1),)
+ act_tmp = P.Concat(-1)(to_concat_temp)
+
+ act_tmp = act_tmp * P.ExpandDims()(template_mask_2d_tmp, -1)
+ act_tmp = self.embedding2d(act_tmp)
+
+ act_tmp = P.Split(0, self.batch_block)(act_tmp)
+ act = ()
+ for i in range(self.batch_block):
+ act = act + (P.Squeeze()(act_tmp[i]),)
+
+ output = []
+ for i in range(self.batch_block):
+ act_batch = act[i]
+ for j in range(self.num_block):
+ act_batch = self.template_pair_stack[j](act_batch, mask_2d)
+ slice_act = P.Reshape()(act_batch, ((1,) + P.Shape()(act_batch)))
+ output.append(slice_act)
+
+ act_tmp_loop = P.Concat()(output)
+ act_tmp = self.output_layer_norm(act_tmp_loop)
+ return act_tmp
+
+
+class TemplateEmbedding(nn.Cell):
+ '''template embedding'''
+
+ def __init__(self, config, mixed_precision=True):
+ super(TemplateEmbedding, self).__init__()
+ self.config = config.template
+ if mixed_precision:
+ self._type = mstype.float16
+ else:
+ self._type = mstype.float32
+ self.num_channels = (self.config.template_pair_stack.triangle_attention_ending_node.value_dim)
+ self.template_embedder = SingleTemplateEmbedding(config, mixed_precision)
+ self.template_pointwise_attention = Attention(self.config.attention.num_head,
+ self.config.attention.key_dim,
+ self.config.attention.gating,
+ q_data_dim=128, m_data_dim=64,
+ output_dim=128, batch_size=None)
+ self.slice_num = config.slice.template_embedding
+
+ def compute(self, flat_query, flat_templates, input_mask):
+ embedding = self.template_pointwise_attention(flat_query, flat_templates, input_mask, index=None,
+ nonbatched_bias=None)
+ return embedding
+
+ def construct(self, query_embedding, template_aatype, template_all_atom_masks, template_all_atom_positions,
+ template_mask, template_pseudo_beta_mask, template_pseudo_beta, mask_2d):
+ '''construct'''
+ num_templates = template_mask.shape[0]
+ num_channels = self.num_channels
+ num_res = query_embedding.shape[0]
+ query_num_channels = query_embedding.shape[-1]
+ mask_2d = F.depend(mask_2d, query_embedding)
+ template_pair_representation = self.template_embedder(mask_2d, template_aatype,
+ template_all_atom_masks, template_all_atom_positions,
+ template_pseudo_beta_mask,
+ template_pseudo_beta)
+ flat_query = P.Reshape()(query_embedding, (num_res * num_res, 1, query_num_channels))
+ flat_templates = P.Reshape()(
+ P.Transpose()(template_pair_representation, (1, 2, 0, 3)),
+ (num_res * num_res, num_templates, num_channels))
+ template_mask_bias = P.ExpandDims()(P.ExpandDims()(P.ExpandDims()(template_mask, 0), 1), 2) - 1.0
+ input_mask = 1e4 * template_mask_bias
+ batched_inputs = (flat_query, flat_templates)
+ nonbatched_inputs = (input_mask,)
+ embedding = _memory_reduce(self.compute, batched_inputs, nonbatched_inputs, self.slice_num)
+ embedding = P.Reshape()(embedding, (num_res, num_res, query_num_channels))
+ # No gradients if no templates.
+ embedding = embedding * (P.ReduceSum()(template_mask) > 0.)
+ return embedding
diff --git a/MindSPONGE/applications/research/FAAST/nmr_relax/model/structure_violation.py b/MindSPONGE/applications/research/FAAST/nmr_relax/model/structure_violation.py
new file mode 100644
index 0000000000000000000000000000000000000000..001c213b086de9d31e6e27774f9bef34a09f7fc5
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/nmr_relax/model/structure_violation.py
@@ -0,0 +1,585 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Modules and utilities for the structure module."""
+import numpy as np
+from mindsponge.common import residue_constants
+from nmr_relax.model import utils
+
+VIOLATION_TOLERANCE_ACTOR = 12.0
+CLASH_OVERLAP_TOLERANCE = 1.5
+
+# one hot encoding for C and N atoms (using atom14 representation)
+C_ONE_HOT = np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+N_ONE_HOT = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+
+# Van der Waals radii for each atom
+ATOMTYPE_RADIUS = \
+ np.array([residue_constants.van_der_waals_radius.get(name[0]) for name in residue_constants.atom_types])
+DISTS_MASK_I = np.eye(14, 14)
+
+# lower bound and upper bound between each atoms used for clashes calculation
+LOWER_BOUND, UPPER_BOUND, _ = \
+ residue_constants.make_atom14_dists_bounds(overlap_tolerance=CLASH_OVERLAP_TOLERANCE,
+ bond_length_tolerance_factor=VIOLATION_TOLERANCE_ACTOR)
+CYS_SG_IDX = 5
+
+
+def within_residue_violations(
+ atom14_pred_positions,
+ atom14_atom_exists,
+ atom14_dists_lower_bound,
+ atom14_dists_upper_bound,
+ tighten_bounds_for_loss,
+ dists_mask_i
+):
+ """Loss to penalize steric clashes within residues.
+ This is a loss penalizing any steric violations or clashes of non-bonded atoms in a given peptide.
+
+ Args:
+ atom14_pred_positions (Tensor): predicted positions of atoms in global prediction frame.
+ shape :math:`(N_{res}, 14, 3)` .
+ atom14_atom_exists (Tensor): mask denoting whether atom at positions exists for given amino acid type.
+ shape :math:`(N_{res}, 14)` .
+ atom14_dists_lower_bound (Tensor): lower bond on allowed distances. shape :math:`(N_{res}, 14, 14)` .
+ atom14_dists_upper_bound (Tensor): upper bond on allowed distances. shape :math:`(N_{res}, 14, 14)` .
+ tighten_bounds_for_loss (float): Extra factor to tighten loss. Default: 0.0.
+ dists_mask_i (Tensor): initial distants mask, shape: :math:`(14, 14)` .
+
+ Returns:
+ - **per_atom_loss_sum** (Tensor) - sum of all clash losses per atom, shape :math:`(N_{res}, 14)` .
+ - **per_atom_violations** (Tensor) - violation per atom, shape :math:`(N_{res}, 14)` .
+
+ Symbol:
+ :math:`N_{res}`, number of amino acids.
+
+ Supported Platforms:
+ ``Ascend`` ``GPU``
+
+ Examples:
+ >>> import mindspore as ms
+ >>> from mindspore import Tensor
+ >>> import numpy as np
+ >>> from mindsponge.metrics import within_residue_violations
+ >>> atom14_pred_positions = Tensor(np.random.random(size=(50, 14, 3)), ms.float32)
+ >>> atom14_atom_exists = Tensor(np.random.random(size=(50, 14)), ms.float32)
+ >>> atom14_dists_lower_bound = Tensor(np.random.random(size=(50, 14, 14)), ms.float32)
+ >>> atom14_dists_upper_bound = Tensor(np.random.random(size=(50, 14, 14)), ms.float32)
+ >>> tighten_bounds_for_loss = 0.0
+ >>> dists_mask_i = Tensor(np.eye(14, 14), ms.int32)
+ >>> per_atom_loss_sum, per_atom_violations = within_residue_violations(atom14_pred_positions,
+ ... atom14_atom_exists,
+ ... atom14_dists_lower_bound,
+ ... atom14_dists_upper_bound,
+ ... tighten_bounds_for_loss,
+ ... dists_mask_i)
+ >>> print(per_atom_loss_sum.shape, per_atom_violations.shape)
+ (50, 14) (50, 14)
+
+ """
+
+ dists_masks = (1. - dists_mask_i[None])
+ dists_masks = dists_masks * (atom14_atom_exists[:, :, None] * atom14_atom_exists[:, None, :])
+
+ dists = np.sqrt(1e-10 + np.sum(
+ np.square(atom14_pred_positions[:, :, None, :] - atom14_pred_positions[:, None, :, :]), axis=-1))
+ dists_to_low_error = np.maximum(0, atom14_dists_lower_bound + tighten_bounds_for_loss - dists)
+ dists_to_high_error = np.maximum(0, dists - (atom14_dists_upper_bound - tighten_bounds_for_loss))
+ loss = dists_masks * (dists_to_low_error + dists_to_high_error)
+ per_atom_loss_sum = np.sum(loss, axis=1) + np.sum(loss, axis=2)
+ lower = (dists < atom14_dists_lower_bound).astype(np.int32)
+ high = (dists > atom14_dists_upper_bound).astype(np.int32)
+ violations = dists_masks * ((lower + high).astype(bool))
+
+ per_atom_violations = np.maximum(np.max(violations, axis=1), np.max(violations, axis=2))
+
+ return per_atom_loss_sum, per_atom_violations
+
+
+def between_residue_clash(
+ atom14_pred_positions,
+ atom14_atom_exists,
+ atom14_atom_radius,
+ residue_index,
+ c_one_hot,
+ n_one_hot,
+ overlap_tolerance_soft,
+ overlap_tolerance_hard,
+ cys_sg_idx):
+ """
+ This is a loss penalizing any steric clashes due to non bonded atoms in different peptides coming too close.
+
+ Args:
+ atom14_pred_positions (Tensor): predicted positions of atoms in global prediction frame.
+ shape is :math:`(N_{res}, 14, 3)` .
+ atom14_atom_exists (Tensor): mask denoting whether atom at positions exists for given amino acid type.
+ shape is :math:`(N_{res}, 14)` .
+ atom14_atom_radius (Tensor): Van der Waals radius for each atom. shape is :math:`(N_{res}, 14)` .
+ residue_index (Tensor): Residue index for given amino acid. shape is :math:`(N_{res}, )` ,
+ range from 1 to :math:`N_{res}` .
+ c_one_hot (Tensor): one hot encoding for C atoms (using atom14 representation). shape is (14, ) .
+ n_one_hot (Tensor): one hot encoding for N atoms (using atom14 representation). shape is (14, ) .
+ overlap_tolerance_soft (float): soft tolerance factor. in default: 12.0.
+ overlap_tolerance_hard (float): hard tolerance factor. in default: 1.5.
+ cys_sg_idx (Tensor): CYS amino acid index. Default: 5.
+ see more at `mindsponge.common.residue_constants`. Shape: `()` .
+
+ Returns:
+ - Tensor, mean_loss, average clash loss. Shape is `()` .
+ - Tensor, per_atom_loss_sum, sum of all clash losses per atom, shape is :math:`(N_{res}, 14)` .
+ - Tensor, per_atom_clash_mask, mask whether atom clashes with any other atom,
+ shape is :math:`(N_{res}, 14)` .
+
+ Symbol:
+ :math:`N_{res}`, number of amino acids.
+
+ Supported Platforms:
+ ``Ascend`` ``GPU``
+
+ Examples:
+ >>> import mindspore as ms
+ >>> from mindspore import Tensor
+ >>> import numpy as np
+ >>> from mindsponge.metrics import between_residue_clash
+ >>> atom14_pred_positions = Tensor(np.random.random(size=(50, 14, 3)), ms.float32)
+ >>> atom14_atom_exists = Tensor(np.random.randint(2, size=(50, 14)))
+ >>> atom14_atom_radius = Tensor(np.random.random(size=(50, 14)), ms.float32)
+ >>> residue_index = Tensor(np.array(range(50)), ms.int32)
+ >>> c_one_hot = Tensor(np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), ms.int32)
+ >>> n_one_hot = Tensor(np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), ms.int32)
+ >>> overlap_tolerance_soft = 12.0
+ >>> overlap_tolerance_hard = 1.5
+ >>> cys_sg_idx = Tensor(5, ms.int32)
+ >>> mean_loss, per_atom_loss_sum, per_atom_clash_mask = between_residue_clash(atom14_pred_positions,
+ ... atom14_atom_exists,
+ ... atom14_atom_radius,
+ ... residue_index,
+ ... c_one_hot,
+ ... n_one_hot,
+ ... overlap_tolerance_soft,
+ ... overlap_tolerance_hard,
+ ... cys_sg_idx)
+ >>> print(mean_loss.shape, per_atom_loss_sum.shape, per_atom_clash_mask.shape)
+ () (50,14) (50,14)
+
+ """
+
+ dists = np.sqrt(1e-10 + np.sum(
+ np.square(atom14_pred_positions[:, None, :, None, :] - atom14_pred_positions[None, :, None, :, :]), axis=-1))
+ dists_mask = atom14_atom_exists[:, None, :, None] * atom14_atom_exists[None, :, None, :]
+ dists_mask *= (residue_index[:, None, None, None] < residue_index[None, :, None, None])
+
+ # Backbone C--N bond between subsequent residues is no clash.
+ neighbour_mask = ((residue_index[:, None, None, None] + 1) == residue_index[None, :, None, None])
+ c_n_bonds = neighbour_mask * c_one_hot[None, None, :, None] * n_one_hot[None, None, None, :]
+ dists_mask *= (1. - c_n_bonds)
+
+ # Disulfide bridge between two cysteines is no clash.
+ cys_sg_one_hot = np.eye(14)[cys_sg_idx]
+ disulfide_bonds = (cys_sg_one_hot[None, None, :, None] * cys_sg_one_hot[None, None, None, :])
+ dists_mask *= (1. - disulfide_bonds)
+
+ dists_lower_bound = dists_mask * (atom14_atom_radius[:, None, :, None] + atom14_atom_radius[None, :, None, :])
+ dists_to_low_error = dists_mask * np.maximum(0, dists_lower_bound - overlap_tolerance_soft - dists)
+ mean_loss = np.sum(dists_to_low_error) / (1e-6 + np.sum(dists_mask))
+ per_atom_loss_sum = (dists_to_low_error.sum(axis=(0, 2)) + dists_to_low_error.sum(axis=(1, 3)))
+ clash_mask = dists_mask * (dists < (dists_lower_bound - overlap_tolerance_hard))
+ per_atom_clash_mask = np.maximum(clash_mask.max(axis=(0, 2)), clash_mask.max(axis=(1, 3)))
+
+ return mean_loss, per_atom_loss_sum, per_atom_clash_mask
+
+
+def between_residue_bond(
+ pred_atom_positions,
+ pred_atom_mask,
+ residue_index,
+ aatype,
+ tolerance_factor_soft=12.0,
+ tolerance_factor_hard=12.0
+):
+ """
+ Flat-bottom loss to penalize structural violations between residues. This is a loss penalizing any violation
+ of the geometry around the peptide bond between consecutive amino acids.
+
+ Args:
+ pred_atom_positions (Tensor): Atom positions in atom37/14 representation, shape :math:`(N_{res}, 37, 3)`.
+ or shape :math:`(N_{res}, 14, 3)` .
+ pred_atom_mask (Tensor): Atom mask in atom37/14 representation. shape :math:`(N_{res}, 37)` or
+ shape :math:`(N_{res}, 14)` .
+ residue_index (Tensor): Residue index for given amino acid, this is assumed to be monotonically
+ increasing. Range from 1 to :math:`N_{res}`. shape :math:`(N_{res}, )` .
+ aatype (Tensor): amino acid types. Range is :math:`[0,20]`. shape :math:`(N_{res}, )` .
+ tolerance_factor_soft (float): soft tolerance factor measured in standard deviations of pdb distributions.
+ Default: 12.0 .
+ tolerance_factor_hard (float): hard tolerance factor measured in standard deviations of pdb distributions.
+ Default: 12.0 .
+
+ Returns:
+ - Tensor, c_n_loss_mean, loss for peptide bond length violations. shape is :math:`( )` .
+ - Tensor, ca_c_n_loss_mean, loss for violations of bond angle around C spanned by CA, C, N.
+ shape is :math:`( )` .
+ - Tensor, c_n_ca_loss_mean, loss for violations of bond angle around N spanned by C, N, CA.
+ shape is :math:`( )` .
+ - Tensor, per_residue_loss_sum, sum of all losses of each residue. shape is :math:`(N_{res}, )` .
+ - Tensor, per_residue_violation_mask, mask denoting all residues with violation present.
+ shape is :math:`(N_{res}, )` .
+
+ Symbol:
+ :math:`N_{res}`, number of amino acids.
+
+ Supported Platforms:
+ ``Ascend`` ``GPU``
+
+ Examples:
+ >>> import mindspore as ms
+ >>> from mindspore import Tensor
+ >>> import numpy as np
+ >>> from mindsponge.metrics import between_residue_bond
+ >>> np.random.seed(1)
+ >>> pred_atom_positions = Tensor(np.random.random(size=(50,37,3)), ms.float32)
+ >>> pred_atom_mask = Tensor(np.random.randint(2,size=(50,37)), ms.int32)
+ >>> residue_index = Tensor(np.array(range(50)), ms.int32)
+ >>> aatype = Tensor(np.random.randint(20, size=(50,)), ms.int32)
+ >>> tolerance_factor_soft = 12.0
+ >>> tolerance_factor_hard = 12.0
+ >>> result = between_residue_bond(pred_atom_positions, pred_atom_mask, residue_index, aatype,
+ >>> tolerance_factor_soft, tolerance_factor_hard)
+ >>> for x in result:
+ >>> print(x)
+ 0.52967054
+ 0.6045412
+ 0.39251995
+ [0.62809587 1.6770853 1.7221183 1.0325309 1.3417522 1.79882
+ 1.7718308 1.5092779 1.5653987 1.9564128 1.6804926 1.6051245
+ 1.5033073 1.5895741 2.1686926 2.126039 1.3837843 1.2554975
+ 1.8135165 2.1593785 1.9408598 1.7281027 1.8666006 1.9623451
+ 1.8177024 1.7543832 1.5969353 1.2150483 0.9833115 1.219868
+ 1.7008476 1.6968286 1.7648234 1.5584714 1.370602 1.8525059
+ 1.7938454 1.5313196 1.6940074 1.8512855 1.8222975 1.6600168
+ 1.9163743 1.7201058 1.6288358 1.6055745 1.521946 1.6553445
+ 1.6175683 0.894606 ]
+ [1. 1. 0. 1. 1. 0. 0. 1. 1. 1. 1. 0. 0. 0. 0. 1. 1. 1. 1. 1. 0. 1. 1. 0.
+ 0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 1. 1. 1. 0.
+ 1. 1.]
+
+ """
+
+ # Get the positions of the relevant backbone atoms.
+ this_ca_pos = pred_atom_positions[:-1, 1, :]
+ this_ca_mask = pred_atom_mask[:-1, 1]
+ this_c_pos = pred_atom_positions[:-1, 2, :]
+ this_c_mask = pred_atom_mask[:-1, 2]
+ next_n_pos = pred_atom_positions[1:, 0, :]
+ next_n_mask = pred_atom_mask[1:, 0]
+ next_ca_pos = pred_atom_positions[1:, 1, :]
+ next_ca_mask = pred_atom_mask[1:, 1]
+ has_no_gap_mask = ((residue_index[1:] - residue_index[:-1]) == 1.0).astype(np.float32)
+
+ # Compute loss for the C--N bond.
+ c_n_bond_length = np.sqrt(1e-6 + np.sum(np.square(this_c_pos - next_n_pos), axis=-1))
+
+ # The C-N bond to proline has slightly different length because of the ring.
+ next_is_proline = (aatype[1:] == residue_constants.resname_to_idx['PRO']).astype(np.float32)
+ gt_length = ((1. - next_is_proline) * residue_constants.between_res_bond_length_c_n[0]
+ + next_is_proline * residue_constants.between_res_bond_length_c_n[1])
+ gt_stddev = ((1. - next_is_proline) * residue_constants.between_res_bond_length_stddev_c_n[0] +
+ next_is_proline * residue_constants.between_res_bond_length_stddev_c_n[1])
+ c_n_bond_length_error = np.sqrt(1e-6 + np.square(c_n_bond_length - gt_length))
+ c_n_loss_per_residue = np.maximum(0, c_n_bond_length_error - tolerance_factor_soft * gt_stddev)
+ mask = this_c_mask * next_n_mask * has_no_gap_mask
+ c_n_loss_mean = np.sum(mask * c_n_loss_per_residue) / (np.sum(mask) + 1e-6)
+ c_n_violation_mask = mask * (c_n_bond_length_error > (tolerance_factor_hard * gt_stddev))
+
+ # Compute loss for the angles.
+ ca_c_bond_length = np.sqrt(1e-6 + np.sum(np.square(this_ca_pos - this_c_pos), axis=-1))
+ n_ca_bond_length = np.sqrt(1e-6 + np.sum(np.square(next_n_pos - next_ca_pos), axis=-1))
+
+ c_ca_unit_vec = (this_ca_pos - this_c_pos) / ca_c_bond_length[:, None]
+ c_n_unit_vec = (next_n_pos - this_c_pos) / c_n_bond_length[:, None]
+ n_ca_unit_vec = (next_ca_pos - next_n_pos) / n_ca_bond_length[:, None]
+
+ ca_c_n_cos_angle = np.sum(c_ca_unit_vec * c_n_unit_vec, axis=-1)
+ gt_angle = residue_constants.between_res_cos_angles_ca_c_n[0]
+ gt_stddev = residue_constants.between_res_cos_angles_ca_c_n[1]
+ ca_c_n_cos_angle_error = np.sqrt(1e-6 + np.square(ca_c_n_cos_angle - gt_angle))
+ ca_c_n_loss_per_residue = np.maximum(0, ca_c_n_cos_angle_error - tolerance_factor_soft * gt_stddev)
+ mask = this_ca_mask * this_c_mask * next_n_mask * has_no_gap_mask
+ ca_c_n_loss_mean = np.sum(mask * ca_c_n_loss_per_residue) / (np.sum(mask) + 1e-6)
+ ca_c_n_violation_mask = mask * (ca_c_n_cos_angle_error > (tolerance_factor_hard * gt_stddev))
+
+ c_n_ca_cos_angle = np.sum((-c_n_unit_vec) * n_ca_unit_vec, axis=-1)
+ gt_angle = residue_constants.between_res_cos_angles_c_n_ca[0]
+ gt_stddev = residue_constants.between_res_cos_angles_c_n_ca[1]
+ c_n_ca_cos_angle_error = np.sqrt(1e-6 + np.square(c_n_ca_cos_angle - gt_angle))
+ c_n_ca_loss_per_residue = np.maximum(0, c_n_ca_cos_angle_error - tolerance_factor_soft * gt_stddev)
+ mask = this_c_mask * next_n_mask * next_ca_mask * has_no_gap_mask
+ c_n_ca_loss_mean = np.sum(mask * c_n_ca_loss_per_residue) / (np.sum(mask) + 1e-6)
+ c_n_ca_violation_mask = mask * (c_n_ca_cos_angle_error > (tolerance_factor_hard * gt_stddev))
+
+ # Compute a per residue loss (equally distribute the loss to both neighbouring residues).
+ per_residue_loss_sum = c_n_loss_per_residue + ca_c_n_loss_per_residue + c_n_ca_loss_per_residue
+ per_residue_loss_sum = 0.5 * (np.pad(per_residue_loss_sum, [[0, 1]]) + np.pad(per_residue_loss_sum, [[1, 0]]))
+
+ # Compute hard violations.
+ per_residue_violation_mask = np.max(np.stack([c_n_violation_mask, ca_c_n_violation_mask, c_n_ca_violation_mask]),
+ axis=0)
+ per_residue_violation_mask = np.maximum(np.pad(per_residue_violation_mask, [[0, 1]]),
+ np.pad(per_residue_violation_mask, [[1, 0]]))
+
+ result = (c_n_loss_mean, ca_c_n_loss_mean, c_n_ca_loss_mean, per_residue_loss_sum, per_residue_violation_mask)
+ return result
+
+
+def get_structural_violations(atom14_atom_exists, residue_index, aatype, residx_atom14_to_atom37,
+ atom14_pred_positions, violation_tolerance_factor=VIOLATION_TOLERANCE_ACTOR,
+ clash_overlap_tolerance=CLASH_OVERLAP_TOLERANCE, lower_bound=LOWER_BOUND,
+ upper_bound=UPPER_BOUND, atomtype_radius=ATOMTYPE_RADIUS,
+ c_one_hot=C_ONE_HOT, n_one_hot=N_ONE_HOT, dists_mask_i=DISTS_MASK_I,
+ cys_sg_idx=CYS_SG_IDX):
+ """Computes several checks for structural violations.
+
+ Args:
+ atom14_atom_exists (Tensor): mask denoting whether atom at positions exists for given amino acid type.
+ shape :math:`(N_{res}, 14)` .
+ residue_index (Tensor): Residue index for given amino acid range from 0 to :math:`N_{res} - 1`.
+ Shape :math:`(N_{res}, )` .
+ aatype (Tensor): amino acid types. shape :math:`(N_{res}, )` . Range is :math:`[0,20]` .
+ residx_atom14_to_atom37 (Tensor): mapping for (residx, atom14) --> atom37. shape :math:`(N_{res}, 14)` .
+ atom14_pred_positions (Tensor): predicted positions of atoms in global prediction frame.
+ shape :math:`(N_{res}, 14, 3)` .
+ violation_tolerance_factor (float): violation between amino acid tolerance factor. Default: 12.0 .
+ clash_overlap_tolerance (float): clash overlap tolerance factor. Default: 1.5 .
+ lower_bound (Tensor): lower bond on allowed distances. shape :math:`(N_{res}, 14, 14)` .
+ upper_bound (Tensor): upper bond on allowed distances. shape :math:`(N_{res}, 14, 14)` .
+ atomtype_radius (Tensor): Van der Waals radius for each amino acid. shape: :math:`(37, )` .
+ c_one_hot (Tensor): one hot encoding for C atoms (using atom14 representation).
+ shape: :math:`(14, )` .
+ n_one_hot (Tensor): one hot encoding for N atoms (using atom14 representation).
+ shape: :math:`(14, )` .
+ dists_mask_i (Tensor): initial distants mask, shape: :math:`(14, 14)` .
+ cys_sg_idx (Tensor): CYS amino acid index. Default: 5 .
+ see more at `mindsponge.common.residue_constants`.
+
+ Returns:
+ - bonds_c_n_loss_mean (Tensor), loss for peptide bond length violations. shape is :math:`()`.
+ - angles_ca_c_n_loss_mean (Tensor), loss for violations of bond angle around C spanned by CA, C, N.
+ Shape is :math:`()`.
+ - angles_c_n_ca_loss_mean (Tensor), loss for violations of bond angle around N spanned by C, N, CA.
+ Shape is :math:`()`.
+ - connections_per_residue_loss_sum (Tensor), sum of all losses of each residue. shape is :math:`(N_{res}, )` .
+ - connections_per_residue_violation_mask (Tensor), mask denoting all residues with violation present.
+ shape is :math:`(N_{res}, )` .
+ - clashes_mean_loss (Tensor), average clash loss. shape: :math:`()` .
+ - clashes_per_atom_loss_sum (Tensor), sum of all clash losses per atom, shape :math:`(N_{res}, 14)` .
+ - clashes_per_atom_clash_mask (Tensor), mask whether atom clashes with any other atom.
+ shape :math:`(N_{res}, 14)` .
+ - per_atom_loss_sum (Tensor), sum of all clash losses per atom, shape :math:`(N_{res}, 14)` .
+ - per_atom_violations (Tensor), violation per atom, shape :math:`(N_{res}, 14)` .
+ - total_per_residue_violations_mask (Tensor), violation masks for all residues, shape :math:`(N_{res}, )` .
+ - structure_violation_loss (Tensor), total violations for all amino acids. shape is :math:`()` .
+
+ Symbol:
+ :math:`N_{res}`, number of amino acids.
+
+ Supported Platforms:
+ ``Ascend`` ``GPU``
+
+ Examples:
+ >>> import mindspore as ms
+ >>> from mindspore import Tensor
+ >>> import numpy as np
+ >>> from mindsponge.metrics import get_structural_violations
+ >>> atom14_atom_exists = Tensor(np.random.random(size=(50, 14)), ms.float32)
+ >>> residue_index = Tensor(np.array(range(50)), ms.int32)
+ >>> aatype = Tensor(np.random.randint(20, size=(50,)), ms.int32)
+ >>> residx_atom14_to_atom37 = Tensor(np.random.randint(2, size=(50, 14)), ms.int32)
+ >>> atom14_pred_positions = Tensor(np.random.random(size=(50, 14, 3)), ms.float32)
+ >>> violation_tolerance_factor = 12.0
+ >>> clash_overlap_tolerance = 1.5
+ >>> lower_bound = Tensor(np.random.random(size=(50, 14, 14)), ms.float32)
+ >>> upper_bound = Tensor(np.random.random(size=(50, 14, 14)), ms.float32)
+ >>> atomtype_radius =Tensor([1.55, 1.7, 1.7, 1.7, 1.52, 1.7, 1.7, 1.7, 1.52, 1.52, 1.8,
+ ... 1.7, 1.7, 1.7, 1.55, 1.55, 1.52, 1.52, 1.8, 1.7, 1.7, 1.7,
+ ... 1.7, 1.55, 1.55, 1.55, 1.52, 1.52, 1.7, 1.55, 1.55, 1.52, 1.7,
+ ... 1.7, 1.7, 1.55, 1.52], ms.float32)
+ >>> c_one_hot = Tensor(np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), ms.int32)
+ >>> n_one_hot = Tensor(np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), ms.int32)
+ >>> dists_mask_i = Tensor(np.eye(14, 14), ms.int32)
+ >>> cys_sg_idx = Tensor(5, ms.int32)
+ >>> result = get_structural_violations(atom14_atom_exists, residue_index, aatype, residx_atom14_to_atom37,
+ ... atom14_pred_positions, violation_tolerance_factor,
+ ... clash_overlap_tolerance, lower_bound, upper_bound, atomtype_radius,
+ ... c_one_hot, n_one_hot, dists_mask_i,cys_sg_idx)
+ >>> for r in result:
+ >>> print(r.shape)
+ ()
+ ()
+ ()
+ (50,)
+ (50,)
+ ()
+ (50, 14)
+ (50, 14)
+ (50, 14)
+ (50, 14)
+ (50,)
+ ()
+
+ """
+
+ # Compute between residue backbone violations of bonds and angles.
+ result = \
+ between_residue_bond(
+ pred_atom_positions=atom14_pred_positions,
+ pred_atom_mask=atom14_atom_exists.astype(np.float32),
+ residue_index=residue_index.astype(np.float32),
+ aatype=aatype,
+ tolerance_factor_soft=violation_tolerance_factor,
+ tolerance_factor_hard=violation_tolerance_factor)
+ c_n_loss_mean, ca_c_n_loss_mean, c_n_ca_loss_mean, per_residue_loss_sum, per_residue_violation_mask = result
+ # Compute the Van der Waals radius for every atom (the first letter of the atom name is the element type).
+ # Shape: (N, 14).
+ atom14_atom_radius = atom14_atom_exists * utils.batched_gather(
+ atomtype_radius, residx_atom14_to_atom37)
+
+ # Compute the between residue clash loss.
+ mean_loss, clashes_per_atom_loss_sum, per_atom_clash_mask = between_residue_clash(
+ atom14_pred_positions=atom14_pred_positions,
+ atom14_atom_exists=atom14_atom_exists,
+ atom14_atom_radius=atom14_atom_radius,
+ residue_index=residue_index,
+ c_one_hot=c_one_hot,
+ n_one_hot=n_one_hot,
+ overlap_tolerance_soft=clash_overlap_tolerance,
+ overlap_tolerance_hard=clash_overlap_tolerance,
+ cys_sg_idx=cys_sg_idx
+ )
+
+ # Compute all within-residue violations (clashes,
+ # bond length and angle violations).
+ atom14_dists_lower_bound = utils.batched_gather(lower_bound, aatype)
+ atom14_dists_upper_bound = utils.batched_gather(upper_bound, aatype)
+ per_atom_loss_sum, per_atom_violations = within_residue_violations(
+ atom14_pred_positions=atom14_pred_positions,
+ atom14_atom_exists=atom14_atom_exists,
+ atom14_dists_lower_bound=atom14_dists_lower_bound,
+ atom14_dists_upper_bound=atom14_dists_upper_bound,
+ tighten_bounds_for_loss=0.0,
+ dists_mask_i=dists_mask_i)
+
+ # Combine them to a single per-residue violation mask (used later for LDDT).
+ per_residue_violations_mask = np.max(np.stack([per_residue_violation_mask, np.max(per_atom_clash_mask, axis=-1),
+ np.max(per_atom_violations, axis=-1)]), axis=0)
+ bonds_c_n_loss_mean = c_n_loss_mean
+ angles_ca_c_n_loss_mean = ca_c_n_loss_mean
+ angles_c_n_ca_loss_mean = c_n_ca_loss_mean
+ connections_per_residue_loss_sum = per_residue_loss_sum
+ connections_per_residue_violation_mask = per_residue_violation_mask
+ clashes_mean_loss = mean_loss
+ clashes_per_atom_loss_sum = clashes_per_atom_loss_sum
+ clashes_per_atom_clash_mask = per_atom_clash_mask
+ per_atom_loss_sum = per_atom_loss_sum
+ per_atom_violations = per_atom_violations
+ total_per_residue_violations_mask = per_residue_violations_mask
+ return {
+ 'between_residues': {
+ 'bonds_c_n_loss_mean':
+ bonds_c_n_loss_mean, # ()
+ 'angles_ca_c_n_loss_mean':
+ angles_ca_c_n_loss_mean, # ()
+ 'angles_c_n_ca_loss_mean':
+ angles_c_n_ca_loss_mean, # ()
+ 'connections_per_residue_loss_sum':
+ connections_per_residue_loss_sum, # (N)
+ 'connections_per_residue_violation_mask':
+ connections_per_residue_violation_mask, # (N)
+ 'clashes_mean_loss':
+ clashes_mean_loss, # ()
+ 'clashes_per_atom_loss_sum':
+ clashes_per_atom_loss_sum, # (N, 14)
+ 'clashes_per_atom_clash_mask':
+ clashes_per_atom_clash_mask, # (N, 14)
+ },
+ 'within_residues': {
+ 'per_atom_loss_sum':
+ per_atom_loss_sum, # (N, 14)
+ 'per_atom_violations':
+ per_atom_violations, # (N, 14),
+ },
+ 'total_per_residue_violations_mask':
+ total_per_residue_violations_mask, # (N)
+ }
+
+
+def extreme_ca_ca_distance_violations(
+ pred_atom_positions, # (N, 37(14), 3)
+ pred_atom_mask, # (N, 37(14))
+ residue_index, # (N)
+ max_angstrom_tolerance=1.5
+):
+ """Counts residues whose Ca is a large distance from its neighbor.
+
+ Measures the fraction of CA-CA pairs between consecutive amino acids that
+ are more than 'max_angstrom_tolerance' apart.
+
+ Args:
+ pred_atom_positions: Atom positions in atom37/14 representation
+ pred_atom_mask: Atom mask in atom37/14 representation
+ residue_index: Residue index for given amino acid, this is assumed to be
+ monotonically increasing.
+ max_angstrom_tolerance: Maximum distance allowed to not count as violation.
+ Returns:
+ Fraction of consecutive CA-CA pairs with violation.
+ """
+ this_ca_pos = pred_atom_positions[:-1, 1, :] # (N - 1, 3)
+ this_ca_mask = pred_atom_mask[:-1, 1] # (N - 1)
+ next_ca_pos = pred_atom_positions[1:, 1, :] # (N - 1, 3)
+ next_ca_mask = pred_atom_mask[1:, 1] # (N - 1)
+ has_no_gap_mask = ((residue_index[1:] - residue_index[:-1]) == 1.0).astype(
+ np.float32)
+ ca_ca_distance = np.sqrt(
+ 1e-6 + np.sum(np.square(this_ca_pos - next_ca_pos), axis=-1))
+ violations = (ca_ca_distance -
+ residue_constants.ca_ca) > max_angstrom_tolerance
+ mask = this_ca_mask * next_ca_mask * has_no_gap_mask
+ return utils.mask_mean(mask=mask, value=violations)
+
+
+def compute_violation_metrics(
+ batch,
+ atom14_pred_positions, # (N, 14, 3)
+ violations,
+):
+ """Compute several metrics to assess the structural violations."""
+
+ ret = {}
+ extreme_ca_ca_violations = extreme_ca_ca_distance_violations(
+ pred_atom_positions=atom14_pred_positions,
+ pred_atom_mask=batch['atom14_atom_exists'].astype(np.float32),
+ residue_index=batch['residue_index'].astype(np.float32))
+ ret['violations_extreme_ca_ca_distance'] = extreme_ca_ca_violations
+ ret['violations_between_residue_bond'] = utils.mask_mean(
+ mask=batch['seq_mask'],
+ value=violations['between_residues'][
+ 'connections_per_residue_violation_mask'])
+ ret['violations_between_residue_clash'] = utils.mask_mean(
+ mask=batch['seq_mask'],
+ value=np.max(violations['between_residues']['clashes_per_atom_clash_mask'], axis=-1))
+ ret['violations_within_residue'] = utils.mask_mean(
+ mask=batch['seq_mask'],
+ value=np.max(violations['within_residues']['per_atom_violations'], axis=-1))
+ ret['violations_per_residue'] = utils.mask_mean(
+ mask=batch['seq_mask'],
+ value=violations['total_per_residue_violations_mask'])
+ return ret
diff --git a/MindSPONGE/applications/research/FAAST/nmr_relax/model/utils.py b/MindSPONGE/applications/research/FAAST/nmr_relax/model/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..82e2c48125d1761651cb9da2533fa9ce750f1a2f
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/nmr_relax/model/utils.py
@@ -0,0 +1,52 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+'''utils'''
+import collections
+import numbers
+import numpy as np
+
+
+def batched_gather(params, indices, axis=0):
+ """batched_gather."""
+ take_fn = lambda p, i: np.take(p, i, axis=axis)
+ return take_fn(params, indices)
+
+
+def mask_mean(mask, value, axis=None, drop_mask_channel=False, eps=1e-10):
+ """Masked mean."""
+ if drop_mask_channel:
+ mask = mask[..., 0]
+
+ mask_shape = mask.shape
+ value_shape = value.shape
+
+ assert len(mask_shape) == len(value_shape)
+
+ if isinstance(axis, numbers.Integral):
+ axis = [axis]
+ elif axis is None:
+ axis = list(range(len(mask_shape)))
+ assert isinstance(axis, collections.Iterable), (
+ 'axis needs to be either an iterable, integer or "None"')
+
+ broadcast_factor = 1.
+ for axis_ in axis:
+ value_size = value_shape[axis_]
+ mask_size = mask_shape[axis_]
+ if mask_size == 1:
+ broadcast_factor *= value_size
+ else:
+ assert mask_size == value_size
+ return (mask * value).sum(axis=tuple(axis)) / (mask.sum(axis=tuple(axis)) * broadcast_factor + eps)
diff --git a/MindSPONGE/applications/research/FAAST/nmr_relax/relax/amber_minimize.py b/MindSPONGE/applications/research/FAAST/nmr_relax/relax/amber_minimize.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4ee1d62faec0797fd9f61a421b9f10a761c344a
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/nmr_relax/relax/amber_minimize.py
@@ -0,0 +1,542 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Restrained Amber Minimization of a structure."""
+import io
+import time
+from typing import Collection, Optional, Sequence
+
+from absl import logging
+import numpy as np
+from simtk import openmm
+from simtk import unit
+from simtk.openmm import app as openmm_app
+from simtk.openmm.app import internal
+
+from mindsponge.common import protein, residue_constants
+from nmr_relax.model import structure_violation
+from nmr_relax.relax import cleanup
+from nmr_relax.relax import utils
+
+ENERGY = unit.kilocalories_per_mole
+LENGTH = unit.angstroms
+
+
+def will_restrain(atom: openmm_app.Atom, rset: str) -> bool:
+ """Returns True if the atom will be restrained by the given restraint set."""
+
+ if rset == "non_hydrogen":
+ return atom.element.name != "hydrogen"
+ elif rset == "c_alpha":
+ return atom.name == "CA"
+ else:
+ return False
+
+
+def _add_restraints(
+ system: openmm.System,
+ reference_pdb: openmm_app.PDBFile,
+ stiffness: unit.Unit,
+ rset: str,
+ exclude_residues: Sequence[int]):
+ """Adds a harmonic potential that restrains the end-to-end distance."""
+ assert rset in ["non_hydrogen", "c_alpha"]
+
+ force = openmm.CustomExternalForce(
+ "0.5 * k * ((x-x0)^2 + (y-y0)^2 + (z-z0)^2)")
+ force.addGlobalParameter("k", stiffness)
+ for p in ["x0", "y0", "z0"]:
+ force.addPerParticleParameter(p)
+
+ for i, atom in enumerate(reference_pdb.topology.atoms()):
+ if atom.residue.index in exclude_residues:
+ continue
+ if will_restrain(atom, rset):
+ force.addParticle(i, reference_pdb.positions[i])
+ logging.info("Restraining %d / %d particles.",
+ force.getNumParticles(), system.getNumParticles())
+ system.addForce(force)
+
+
+def _openmm_minimize(
+ pdb_str: str,
+ max_iterations: int,
+ tolerance: unit.Unit,
+ stiffness: unit.Unit,
+ restraint_set: str,
+ exclude_residues: Sequence[int]):
+ """Minimize energy via openmm."""
+
+ pdb_file = io.StringIO(pdb_str)
+ pdb = openmm_app.PDBFile(pdb_file)
+
+ force_field = openmm_app.ForceField("amber99sb.xml")
+ constraints = openmm_app.HBonds
+ system = force_field.createSystem(
+ pdb.topology, constraints=constraints)
+ if stiffness > 0 * ENERGY / (LENGTH ** 2):
+ _add_restraints(system, pdb, stiffness, restraint_set, exclude_residues)
+
+ integrator = openmm.LangevinIntegrator(0, 0.01, 0.0)
+ platform = openmm.Platform.getPlatformByName("CPU")
+ simulation = openmm_app.Simulation(
+ pdb.topology, system, integrator, platform)
+ simulation.context.setPositions(pdb.positions)
+
+ ret = {}
+ state = simulation.context.getState(getEnergy=True, getPositions=True)
+ ret["einit"] = state.getPotentialEnergy().value_in_unit(ENERGY)
+ ret["posinit"] = state.getPositions(asNumpy=True).value_in_unit(LENGTH)
+ simulation.minimizeEnergy(maxIterations=max_iterations,
+ tolerance=tolerance)
+ state = simulation.context.getState(getEnergy=True, getPositions=True)
+ ret["efinal"] = state.getPotentialEnergy().value_in_unit(ENERGY)
+ ret["pos"] = state.getPositions(asNumpy=True).value_in_unit(LENGTH)
+ ret["min_pdb"] = _get_pdb_string(simulation.topology, state.getPositions())
+ return ret
+
+
+def _get_pdb_string(topology: openmm_app.Topology, positions: unit.Quantity):
+ """Returns a pdb string provided OpenMM topology and positions."""
+ with io.StringIO() as f:
+ openmm_app.PDBFile.writeFile(topology, positions, f)
+ return f.getvalue()
+
+
+def _check_cleaned_atoms(pdb_cleaned_string: str, pdb_ref_string: str):
+ """Checks that no atom positions have been altered by cleaning."""
+ cleaned = openmm_app.PDBFile(io.StringIO(pdb_cleaned_string))
+ reference = openmm_app.PDBFile(io.StringIO(pdb_ref_string))
+
+ cl_xyz = np.array(cleaned.getPositions().value_in_unit(LENGTH))
+ ref_xyz = np.array(reference.getPositions().value_in_unit(LENGTH))
+
+ for ref_res, cl_res in zip(reference.topology.residues(),
+ cleaned.topology.residues()):
+ assert ref_res.name == cl_res.name
+ for rat in ref_res.atoms():
+ for cat in cl_res.atoms():
+ if cat.name == rat.name:
+ if not np.array_equal(cl_xyz[cat.index], ref_xyz[rat.index]):
+ raise ValueError(f"Coordinates of cleaned atom {cat} do not match "
+ f"coordinates of reference atom {rat}.")
+
+
+def _check_residues_are_well_defined(prot: protein.Protein):
+ """Checks that all residues contain non-empty atom sets."""
+ if (prot.atom_mask.sum(axis=-1) == 0).any():
+ raise ValueError("Amber minimization can only be performed on proteins with"
+ " well-defined residues. This protein contains at least"
+ " one residue with no atoms.")
+
+
+def _check_atom_mask_is_ideal(prot):
+ """Sanity-check the atom mask is ideal, up to a possible OXT."""
+ atom_mask = prot.atom_mask
+ ideal_atom_mask = protein.ideal_atom_mask(prot)
+ utils.assert_equal_nonterminal_atom_types(atom_mask, ideal_atom_mask)
+
+
+def clean_protein(
+ prot: protein.Protein,
+ checks: bool = True):
+ """Adds missing atoms to Protein instance.
+
+ Args:
+ prot: A `protein.Protein` instance.
+ checks: A `bool` specifying whether to add additional checks to the cleaning
+ process.
+
+ Returns:
+ pdb_string: A string of the cleaned protein.
+ """
+ _check_atom_mask_is_ideal(prot)
+
+ # Clean pdb.
+ prot_pdb_string = protein.to_pdb(prot)
+ pdb_file = io.StringIO(prot_pdb_string)
+ alterations_info = {}
+ fixed_pdb = cleanup.fix_pdb(pdb_file, alterations_info)
+ fixed_pdb_file = io.StringIO(fixed_pdb)
+ pdb_structure = internal.pdbstructure.PdbStructure(fixed_pdb_file)
+ cleanup.clean_structure(pdb_structure, alterations_info)
+
+ logging.info("alterations info: %s", alterations_info)
+
+ # Write pdb file of cleaned structure.
+ as_file = openmm_app.PDBFile(pdb_structure)
+ pdb_string = _get_pdb_string(as_file.getTopology(), as_file.getPositions())
+ if checks:
+ _check_cleaned_atoms(pdb_string, prot_pdb_string)
+ return pdb_string
+
+
+def make_atom14_positions(prot):
+ """Constructs denser atom positions (14 dimensions instead of 37)."""
+ restype_atom14_to_atom37 = [] # mapping (restype, atom14) --> atom37
+ restype_atom37_to_atom14 = [] # mapping (restype, atom37) --> atom14
+ restype_atom14_mask = []
+
+ for rt in residue_constants.restypes:
+ atom_names = residue_constants.restype_name_to_atom14_names[
+ residue_constants.restype_1to3[rt]]
+
+ restype_atom14_to_atom37.append([
+ (residue_constants.atom_order[name] if name else 0)
+ for name in atom_names
+ ])
+
+ atom_name_to_idx14 = {name: i for i, name in enumerate(atom_names)}
+ restype_atom37_to_atom14.append([
+ (atom_name_to_idx14[name] if name in atom_name_to_idx14 else 0)
+ for name in residue_constants.atom_types
+ ])
+
+ restype_atom14_mask.append([(1. if name else 0.) for name in atom_names])
+
+ # Add dummy mapping for restype 'UNK'.
+ restype_atom14_to_atom37.append([0] * 14)
+ restype_atom37_to_atom14.append([0] * 37)
+ restype_atom14_mask.append([0.] * 14)
+
+ restype_atom14_to_atom37 = np.array(restype_atom14_to_atom37, dtype=np.int32)
+ restype_atom37_to_atom14 = np.array(restype_atom37_to_atom14, dtype=np.int32)
+ restype_atom14_mask = np.array(restype_atom14_mask, dtype=np.float32)
+
+ # Create the mapping for (residx, atom14) --> atom37, i.e. an array
+ # with shape (num_res, 14) containing the atom37 indices for this protein.
+ residx_atom14_to_atom37 = restype_atom14_to_atom37[prot["aatype"]]
+ residx_atom14_mask = restype_atom14_mask[prot["aatype"]]
+
+ # Create a mask for known ground truth positions.
+ residx_atom14_gt_mask = residx_atom14_mask * np.take_along_axis(
+ prot["all_atom_mask"], residx_atom14_to_atom37, axis=1).astype(np.float32)
+
+ # Gather the ground truth positions.
+ residx_atom14_gt_positions = residx_atom14_gt_mask[:, :, None] * (
+ np.take_along_axis(prot["all_atom_positions"],
+ residx_atom14_to_atom37[..., None],
+ axis=1))
+
+ prot["atom14_atom_exists"] = residx_atom14_mask
+ prot["atom14_gt_exists"] = residx_atom14_gt_mask
+ prot["atom14_gt_positions"] = residx_atom14_gt_positions
+
+ prot["residx_atom14_to_atom37"] = residx_atom14_to_atom37
+
+ # Create the gather indices for mapping back.
+ residx_atom37_to_atom14 = restype_atom37_to_atom14[prot["aatype"]]
+ prot["residx_atom37_to_atom14"] = residx_atom37_to_atom14
+
+ # Create the corresponding mask.
+ restype_atom37_mask = np.zeros([21, 37], dtype=np.float32)
+ for restype, restype_letter in enumerate(residue_constants.restypes):
+ restype_name = residue_constants.restype_1to3[restype_letter]
+ atom_names = residue_constants.residue_atoms[restype_name]
+ for atom_name in atom_names:
+ atom_type = residue_constants.atom_order[atom_name]
+ restype_atom37_mask[restype, atom_type] = 1
+
+ residx_atom37_mask = restype_atom37_mask[prot["aatype"]]
+ prot["atom37_atom_exists"] = residx_atom37_mask
+
+ # As the atom naming is ambiguous for 7 of the 20 amino acids, provide
+ # alternative ground truth coordinates where the naming is swapped
+ restype_3 = [
+ residue_constants.restype_1to3[res] for res in residue_constants.restypes
+ ]
+ restype_3 += ["UNK"]
+
+ # Matrices for renaming ambiguous atoms.
+ all_matrices = {res: np.eye(14, dtype=np.float32) for res in restype_3}
+ for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
+ correspondences = np.arange(14)
+ for source_atom_swap, target_atom_swap in swap.items():
+ source_index = residue_constants.restype_name_to_atom14_names[
+ resname].index(source_atom_swap)
+ target_index = residue_constants.restype_name_to_atom14_names[
+ resname].index(target_atom_swap)
+ correspondences[source_index] = target_index
+ correspondences[target_index] = source_index
+ renaming_matrix = np.zeros((14, 14), dtype=np.float32)
+ for index, correspondence in enumerate(correspondences):
+ renaming_matrix[index, correspondence] = 1.
+ all_matrices[resname] = renaming_matrix.astype(np.float32)
+ renaming_matrices = np.stack([all_matrices[restype] for restype in restype_3])
+
+ # Pick the transformation matrices for the given residue sequence
+ # shape (num_res, 14, 14).
+ renaming_transform = renaming_matrices[prot["aatype"]]
+
+ # Apply it to the ground truth positions. shape (num_res, 14, 3).
+ alternative_gt_positions = np.einsum("rac,rab->rbc",
+ residx_atom14_gt_positions,
+ renaming_transform)
+ prot["atom14_alt_gt_positions"] = alternative_gt_positions
+
+ # Create the mask for the alternative ground truth (differs from the
+ # ground truth mask, if only one of the atoms in an ambiguous pair has a
+ # ground truth position).
+ alternative_gt_mask = np.einsum("ra,rab->rb",
+ residx_atom14_gt_mask,
+ renaming_transform)
+
+ prot["atom14_alt_gt_exists"] = alternative_gt_mask
+
+ # Create an ambiguous atoms mask. shape: (21, 14).
+ restype_atom14_is_ambiguous = np.zeros((21, 14), dtype=np.float32)
+ for resname, swap in residue_constants.residue_atom_renaming_swaps.items():
+ for atom_name1, atom_name2 in swap.items():
+ restype = residue_constants.restype_order[
+ residue_constants.restype_3to1[resname]]
+ atom_idx1 = residue_constants.restype_name_to_atom14_names[resname].index(
+ atom_name1)
+ atom_idx2 = residue_constants.restype_name_to_atom14_names[resname].index(
+ atom_name2)
+ restype_atom14_is_ambiguous[restype, atom_idx1] = 1
+ restype_atom14_is_ambiguous[restype, atom_idx2] = 1
+
+ # From this create an ambiguous_mask for the given sequence.
+ prot["atom14_atom_is_ambiguous"] = (
+ restype_atom14_is_ambiguous[prot["aatype"]])
+
+ return prot
+
+
+def find_violations(prot_np: protein.Protein):
+ """Analyzes a protein and returns structural violation information.
+
+ Args:
+ prot_np: A protein.
+
+ Returns:
+ violations: A `dict` of structure components with structural violations.
+ violation_metrics: A `dict` of violation metrics.
+ """
+ batch = {
+ "aatype": prot_np.aatype,
+ "all_atom_positions": prot_np.atom_positions.astype(np.float32),
+ "all_atom_mask": prot_np.atom_mask.astype(np.float32),
+ "residue_index": prot_np.residue_index,
+ }
+
+ batch["seq_mask"] = np.ones_like(batch.get("aatype"), np.float32)
+ batch = make_atom14_positions(batch)
+
+ violations = structure_violation.get_structural_violations(
+ atom14_atom_exists=batch.get("atom14_atom_exists"),
+ residue_index=batch.get("residue_index"),
+ aatype=batch.get("aatype"),
+ residx_atom14_to_atom37=batch.get("residx_atom14_to_atom37"),
+ atom14_pred_positions=batch.get("atom14_gt_positions"),
+ )
+ violation_metrics = structure_violation.compute_violation_metrics(
+ batch=batch,
+ atom14_pred_positions=batch.get("atom14_gt_positions"),
+ violations=violations,
+ )
+
+ return violations, violation_metrics
+
+
+def get_violation_metrics(prot: protein.Protein):
+ """Computes violation and alignment metrics."""
+ structural_violations, struct_metrics = find_violations(prot)
+ violation_idx = np.flatnonzero(structural_violations.get("total_per_residue_violations_mask"))
+
+ struct_metrics["residue_violations"] = violation_idx
+ struct_metrics["num_residue_violations"] = len(violation_idx)
+ struct_metrics["structural_violations"] = structural_violations
+ return struct_metrics
+
+
+def _run_one_iteration(
+ *,
+ pdb_string: str,
+ max_iterations: int,
+ tolerance: float,
+ stiffness: float,
+ restraint_set: str,
+ max_attempts: int,
+ exclude_residues: Optional[Collection[int]] = None):
+ """Runs the minimization pipeline.
+
+ Args:
+ pdb_string: A pdb string.
+ max_iterations: An `int` specifying the maximum number of L-BFGS iterations.
+ A value of 0 specifies no limit.
+ tolerance: kcal/mol, the energy tolerance of L-BFGS.
+ stiffness: kcal/mol A**2, spring constant of heavy atom restraining
+ potential.
+ restraint_set: The set of atoms to restrain.
+ max_attempts: The maximum number of minimization attempts.
+ exclude_residues: An optional list of zero-indexed residues to exclude from
+ restraints.
+
+ Returns:
+ A `dict` of minimization info.
+ """
+ exclude_residues = exclude_residues or []
+
+ # Assign physical dimensions.
+ tolerance = tolerance * ENERGY
+ stiffness = stiffness * ENERGY / (LENGTH ** 2)
+
+ start = time.time()
+ minimized = False
+ attempts = 0
+ while not minimized and attempts < max_attempts:
+ attempts += 1
+ try:
+ logging.info("Minimizing protein, attempt %d of %d.",
+ attempts, max_attempts)
+ ret = _openmm_minimize(
+ pdb_string, max_iterations=max_iterations,
+ tolerance=tolerance, stiffness=stiffness,
+ restraint_set=restraint_set,
+ exclude_residues=exclude_residues)
+ minimized = True
+ except Exception as e: # pylint: disable=broad-except
+ logging.info(e)
+ if not minimized:
+ raise ValueError(f"Minimization failed after {max_attempts} attempts.")
+ ret["opt_time"] = time.time() - start
+ ret["min_attempts"] = attempts
+ return ret
+
+
+def run_pipeline(
+ prot: protein.Protein,
+ stiffness: float,
+ max_outer_iterations: int = 1,
+ place_hydrogens_every_iteration: bool = True,
+ max_iterations: int = 0,
+ tolerance: float = 2.39,
+ restraint_set: str = "non_hydrogen",
+ max_attempts: int = 100,
+ checks: bool = True,
+ exclude_residues: Optional[Sequence[int]] = None):
+ """Run iterative amber relax.
+
+ Successive relax iterations are performed until all violations have been
+ resolved. Each iteration involves a restrained Amber minimization, with
+ restraint exclusions determined by violation-participating residues.
+
+ Args:
+ prot: A protein to be relaxed.
+ stiffness: kcal/mol A**2, the restraint stiffness.
+ max_outer_iterations: The maximum number of iterative minimization.
+ place_hydrogens_every_iteration: Whether hydrogens are re-initialized
+ prior to every minimization.
+ max_iterations: An `int` specifying the maximum number of L-BFGS steps
+ per relax iteration. A value of 0 specifies no limit.
+ tolerance: kcal/mol, the energy tolerance of L-BFGS.
+ The default value is the OpenMM default.
+ restraint_set: The set of atoms to restrain.
+ max_attempts: The maximum number of minimization attempts per iteration.
+ checks: Whether to perform cleaning checks.
+ exclude_residues: An optional list of zero-indexed residues to exclude from
+ restraints.
+
+ Returns:
+ out: A dictionary of output values.
+ """
+
+ # `protein.to_pdb` will strip any poorly-defined residues so we need to
+ # perform this check before `clean_protein`.
+ _check_residues_are_well_defined(prot)
+ pdb_string = clean_protein(prot, checks=checks)
+
+ exclude_residues = exclude_residues or []
+ exclude_residues1 = set(exclude_residues)
+ violations = np.inf
+ iteration = 0
+
+ while violations > 0 and iteration < max_outer_iterations:
+ ret = _run_one_iteration(
+ pdb_string=pdb_string,
+ exclude_residues=exclude_residues1,
+ max_iterations=max_iterations,
+ tolerance=tolerance,
+ stiffness=stiffness,
+ restraint_set=restraint_set,
+ max_attempts=max_attempts)
+ prot = protein.from_pdb_string(ret.get("min_pdb"))
+ if place_hydrogens_every_iteration:
+ pdb_string = clean_protein(prot, checks=True)
+ else:
+ pdb_string = ret.get("min_pdb")
+ ret.update(get_violation_metrics(prot))
+ ret.update({
+ "num_exclusions": len(exclude_residues1),
+ "iteration": iteration,
+ })
+ violations = ret.get("violations_per_residue")
+ print("Violation of structure after relaxation: ", violations)
+ exclude_residues1 = exclude_residues1.union(ret.get("residue_violations"))
+
+ logging.info("Iteration completed: Einit %.2f Efinal %.2f Time %.2f s "
+ "num residue violations %d num residue exclusions %d ",
+ ret.get("einit"), ret.get("efinal"), ret.get("opt_time"),
+ ret.get("num_residue_violations"), ret.get("num_exclusions"))
+ iteration += 1
+ return ret
+
+
+def get_initial_energies(pdb_strs: Sequence[str],
+ stiffness: float = 0.0,
+ restraint_set: str = "non_hydrogen",
+ exclude_residues: Optional[Sequence[int]] = None):
+ """Returns initial potential energies for a sequence of PDBs.
+
+ Assumes the input PDBs are ready for minimization, and all have the same
+ topology.
+ Allows time to be saved by not pdbfixing / rebuilding the system.
+
+ Args:
+ pdb_strs: List of PDB strings.
+ stiffness: kcal/mol A**2, spring constant of heavy atom restraining
+ potential.
+ restraint_set: Which atom types to restrain.
+ exclude_residues: An optional list of zero-indexed residues to exclude from
+ restraints.
+
+ Returns:
+ A list of initial energies in the same order as pdb_strs.
+ """
+ exclude_residues = exclude_residues or []
+
+ openmm_pdbs = [openmm_app.PDBFile(internal.pdbstructure.PdbStructure(io.StringIO(p)))
+ for p in pdb_strs]
+ force_field = openmm_app.ForceField("amber99sb.xml")
+ system = force_field.createSystem(openmm_pdbs[0].topology,
+ constraints=openmm_app.HBonds)
+ stiffness = stiffness * ENERGY / (LENGTH ** 2)
+ if stiffness > 0 * ENERGY / (LENGTH ** 2):
+ _add_restraints(system, openmm_pdbs[0], stiffness, restraint_set,
+ exclude_residues)
+ simulation = openmm_app.Simulation(openmm_pdbs[0].topology,
+ system,
+ openmm.LangevinIntegrator(0, 0.01, 0.0),
+ openmm.Platform.getPlatformByName("CPU"))
+ energies = []
+ for pdb in openmm_pdbs:
+ try:
+ simulation.context.setPositions(pdb.positions)
+ state = simulation.context.getState(getEnergy=True)
+ energies.append(state.getPotentialEnergy().value_in_unit(ENERGY))
+ except Exception as e: # pylint: disable=broad-except
+ logging.error("Error getting initial energy, returning large value %s", e)
+ energies.append(unit.Quantity(1e20, ENERGY))
+ return energies
diff --git a/MindSPONGE/applications/research/FAAST/nmr_relax/relax/cleanup.py b/MindSPONGE/applications/research/FAAST/nmr_relax/relax/cleanup.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c62bad9412c555e77beaf801ead124ef0891cad
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/nmr_relax/relax/cleanup.py
@@ -0,0 +1,122 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"clean_up"
+import io
+import pdbfixer
+from simtk.openmm import app
+from simtk.openmm.app import element
+
+
+def fix_pdb(pdbfile, alterations_info):
+ """Apply pdbfixer to the contents of a PDB file; return a PDB string result.
+
+ 1) Replaces nonstandard residues.
+ 2) Removes heterogens (non protein residues) including water.
+ 3) Adds missing residues and missing atoms within existing residues.
+ 4) Adds hydrogens assuming pH=7.0.
+ 5) KeepIds is currently true, so the fixer must keep the existing chain and
+ residue identifiers. This will fail for some files in wider PDB that have
+ invalid IDs.
+
+ Args:
+ pdbfile: Input PDB file handle.
+ alterations_info: A dict that will store details of changes made.
+
+ Returns:
+ A PDB string representing the fixed structure.
+ """
+ fixer = pdbfixer.PDBFixer(pdbfile=pdbfile)
+ fixer.findNonstandardResidues()
+ alterations_info['nonstandard_residues'] = fixer.nonstandardResidues
+ fixer.replaceNonstandardResidues()
+ _remove_heterogens(fixer, alterations_info, keep_water=False)
+ fixer.findMissingResidues()
+ alterations_info['missing_residues'] = fixer.missingResidues
+ fixer.findMissingAtoms()
+ alterations_info['missing_heavy_atoms'] = fixer.missingAtoms
+ alterations_info['missing_terminals'] = fixer.missingTerminals
+ fixer.addMissingAtoms(seed=0)
+ fixer.addMissingHydrogens()
+ out_handle = io.StringIO()
+ app.PDBFile.writeFile(fixer.topology, fixer.positions, out_handle,
+ keepIds=True)
+ return out_handle.getvalue()
+
+
+def clean_structure(pdb_structure, alterations_info):
+ """Applies additional fixes to an OpenMM structure, to handle edge cases.
+
+ Args:
+ pdb_structure: An OpenMM structure to modify and fix.
+ alterations_info: A dict that will store details of changes made.
+ """
+ _replace_met_se(pdb_structure, alterations_info)
+ _remove_chains_of_length_one(pdb_structure, alterations_info)
+
+
+def _remove_heterogens(fixer, alterations_info, keep_water):
+ """Removes the residues that Pdbfixer considers to be heterogens.
+
+ Args:
+ fixer: A Pdbfixer instance.
+ alterations_info: A dict that will store details of changes made.
+ keep_water: If True, water (HOH) is not considered to be a heterogen.
+ """
+ initial_resnames = set()
+ for chain in fixer.topology.chains():
+ for residue in chain.residues():
+ initial_resnames.add(residue.name)
+ fixer.removeHeterogens(keepWater=keep_water)
+ final_resnames = set()
+ for chain in fixer.topology.chains():
+ for residue in chain.residues():
+ final_resnames.add(residue.name)
+ alterations_info['removed_heterogens'] = (
+ initial_resnames.difference(final_resnames))
+
+
+def _replace_met_se(pdb_structure, alterations_info):
+ """Replace the Se in any MET residues that were not marked as modified."""
+ modified_met_residues = []
+ for res in pdb_structure.iter_residues():
+ name = res.get_name_with_spaces().strip()
+ if name == 'MET':
+ s_atom = res.get_atom('SD')
+ if s_atom.element_symbol == 'Se':
+ s_atom.element_symbol = 'S'
+ s_atom.element = element.get_by_symbol('S')
+ modified_met_residues.append(s_atom.residue_number)
+ alterations_info['Se_in_MET'] = modified_met_residues
+
+
+def _remove_chains_of_length_one(pdb_structure, alterations_info):
+ """Removes chains that correspond to a single amino acid.
+
+ A single amino acid in a chain is both N and C terminus. There is no force
+ template for this case.
+
+ Args:
+ pdb_structure: An OpenMM pdb_structure to modify and fix.
+ alterations_info: A dict that will store details of changes made.
+ """
+ removed_chains = {}
+ for model in pdb_structure.iter_models():
+ valid_chains = [c for c in model.iter_chains() if len(c) > 1]
+ invalid_chain_ids = [c.chain_id for c in model.iter_chains() if len(c) <= 1]
+ model.chains = valid_chains
+ for chain_id in invalid_chain_ids:
+ model.chains_by_id.pop(chain_id)
+ removed_chains[model.number] = invalid_chain_ids
+ alterations_info['removed_chains'] = removed_chains
diff --git a/MindSPONGE/applications/research/FAAST/nmr_relax/relax/relax.py b/MindSPONGE/applications/research/FAAST/nmr_relax/relax/relax.py
new file mode 100644
index 0000000000000000000000000000000000000000..573042e0275b119947f5ce354a0b8e1813355e30
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/nmr_relax/relax/relax.py
@@ -0,0 +1,82 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Amber relaxation."""
+from typing import Any, Dict, Sequence, Tuple
+
+import numpy as np
+
+from mindsponge.common import protein
+from nmr_relax.relax import amber_minimize
+from nmr_relax.relax import utils
+
+
+class AmberRelaxation(object):
+ """Amber relaxation."""
+
+ def __init__(self,
+ *,
+ max_iterations: int,
+ tolerance: float,
+ stiffness: float,
+ exclude_residues: Sequence[int],
+ max_outer_iterations: int):
+ """Initialize Amber Relaxer.
+
+ Args:
+ max_iterations: Maximum number of L-BFGS iterations. 0 means no max.
+ tolerance: kcal/mol, the energy tolerance of L-BFGS.
+ stiffness: kcal/mol A**2, spring constant of heavy atom restraining
+ potential.
+ exclude_residues: Residues to exclude from per-atom restraining.
+ Zero-indexed.
+ max_outer_iterations: Maximum number of violation-informed relax
+ iterations. A value of 1 will run the non-iterative procedure used in
+ CASP14. Use 20 so that >95% of the bad cases are relaxed. Relax finishes
+ as soon as there are no violations, hence in most cases this causes no
+ slowdown. In the worst case we do 20 outer iterations.
+ """
+
+ self._max_iterations = max_iterations
+ self._tolerance = tolerance
+ self._stiffness = stiffness
+ self._exclude_residues = exclude_residues
+ self._max_outer_iterations = max_outer_iterations
+
+ def process(self, *,
+ prot: protein.Protein) -> Tuple[str, Dict[str, Any], np.ndarray]:
+ """Runs Amber relax on a prediction, adds hydrogens, returns PDB string."""
+ out = amber_minimize.run_pipeline(
+ prot=prot, max_iterations=self._max_iterations,
+ tolerance=self._tolerance, stiffness=self._stiffness,
+ exclude_residues=self._exclude_residues,
+ max_outer_iterations=self._max_outer_iterations)
+ min_pos = out.get('pos')
+ start_pos = out.get('posinit')
+ rmsd = np.sqrt(np.sum((start_pos - min_pos) ** 2) / start_pos.shape[0])
+ debug_data = {
+ 'initial_energy': out.get('einit'),
+ 'final_energy': out.get('efinal'),
+ 'attempts': out.get('min_attempts'),
+ 'rmsd': rmsd
+ }
+ pdb_str = amber_minimize.clean_protein(prot)
+ min_pdb = utils.overwrite_pdb_coordinates(pdb_str, min_pos)
+ min_pdb = utils.overwrite_b_factors(min_pdb, prot.b_factors)
+ utils.assert_equal_nonterminal_atom_types(
+ protein.from_pdb_string(min_pdb).atom_mask,
+ prot.atom_mask)
+ violations = out.get('structural_violations').get(
+ 'total_per_residue_violations_mask')
+ return min_pdb, debug_data, violations
diff --git a/MindSPONGE/applications/research/FAAST/nmr_relax/relax/utils.py b/MindSPONGE/applications/research/FAAST/nmr_relax/relax/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2ddabe684d391d270eec6f4e6a4dada69403f8a
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/nmr_relax/relax/utils.py
@@ -0,0 +1,82 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Utils for minimization."""
+import io
+
+from Bio import PDB
+import numpy as np
+from simtk.openmm import app as openmm_app
+from simtk.openmm.app import internal
+from mindsponge.common import residue_constants
+
+
+def overwrite_pdb_coordinates(pdb_str: str, pos) -> str:
+ '''overwrite_pdb_coordinates'''
+ pdb_file = io.StringIO(pdb_str)
+ structure = internal.pdbstructure.PdbStructure(pdb_file)
+ topology = openmm_app.PDBFile(structure).getTopology()
+ with io.StringIO() as f:
+ openmm_app.PDBFile.writeFile(topology, pos, f)
+ return f.getvalue()
+
+
+def overwrite_b_factors(pdb_str: str, bfactors: np.ndarray) -> str:
+ """Overwrites the B-factors in pdb_str with contents of bfactors array.
+
+ Args:
+ pdb_str: An input PDB string.
+ bfactors: A numpy array with shape [1, n_residues, 37]. We assume that the
+ B-factors are per residue; i.e. that the nonzero entries are identical in
+ [0, i, :].
+
+ Returns:
+ A new PDB string with the B-factors replaced.
+ """
+ if bfactors.shape[-1] != residue_constants.atom_type_num:
+ raise ValueError(
+ f'Invalid final dimension size for bfactors: {bfactors.shape[-1]}.')
+
+ parser = PDB.PDBParser()
+ handle = io.StringIO(pdb_str)
+ structure = parser.get_structure('', handle)
+
+ curr_resid = ('', '', '')
+ idx = -1
+ for atom in structure.get_atoms():
+ atom_resid = atom.parent.get_id()
+ if atom_resid != curr_resid:
+ idx += 1
+ if idx >= bfactors.shape[0]:
+ raise ValueError('Index into bfactors exceeds number of residues. '
+ 'B-factors shape: {shape}, idx: {idx}.')
+ curr_resid = atom_resid
+ atom.bfactor = bfactors[idx, residue_constants.atom_order['CA']]
+
+ new_pdb = io.StringIO()
+ pdb_io = PDB.PDBIO()
+ pdb_io.set_structure(structure)
+ pdb_io.save(new_pdb)
+ return new_pdb.getvalue()
+
+
+def assert_equal_nonterminal_atom_types(
+ atom_mask: np.ndarray, ref_atom_mask: np.ndarray):
+ """Checks that pre- and post-minimized proteins have same atom set."""
+ # Ignore any terminal OXT atoms which may have been added by minimization.
+ oxt = residue_constants.atom_order['OXT']
+ no_oxt_mask = np.ones(shape=atom_mask.shape, dtype=np.bool)
+ no_oxt_mask[..., oxt] = False
+ np.testing.assert_almost_equal(ref_atom_mask[no_oxt_mask],
+ atom_mask[no_oxt_mask])
diff --git a/MindSPONGE/applications/research/FAAST/run_rasp.py b/MindSPONGE/applications/research/FAAST/run_rasp.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f7675be03c409ee62b7f6591fd4c86a973550c2
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/run_rasp.py
@@ -0,0 +1,220 @@
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""eval script"""
+import argparse
+import ast
+import os
+import stat
+import time
+import pynvml
+import numpy as np
+from data import Feature, RawFeatureGenerator, get_crop_size, get_raw_feature
+from mindspore import Tensor, nn, load_checkpoint
+import mindspore.common.dtype as mstype
+import mindspore.context as context
+from mindsponge.cell.amp import amp_convert
+from mindsponge.common import residue_constants
+from mindsponge.common.config_load import load_config
+from mindsponge.common.protein import to_pdb, from_prediction
+from model import MegaFold, compute_confidence
+from search import mk_hhsearch_db
+
+parser = argparse.ArgumentParser(description='Inputs for eval.py')
+parser.add_argument('--data_config', default="./config/data.yaml", help='data process config')
+parser.add_argument('--use_custom', type=ast.literal_eval, default=False, help='whether use custom')
+parser.add_argument('--model_config', default="./config/model.yaml", help='model config')
+parser.add_argument('--input_path', help='processed raw feature path')
+parser.add_argument('--restraints_path', type=str, help='Location of training restraints file.')
+parser.add_argument('--use_pkl', type=ast.literal_eval, default=False,
+ help="use pkl as input or fasta file as input, in default use fasta")
+parser.add_argument('--use_template', type=ast.literal_eval, default=False,
+ help="use_template or not, in default use template")
+parser.add_argument('--checkpoint_file', help='checkpoint path')
+parser.add_argument('--device_id', default=0, type=int, help='DEVICE_ID')
+parser.add_argument('--a3m_path', type=str, help='a3m_path')
+parser.add_argument('--template_path', type=str, help='template_path')
+parser.add_argument('--run_platform', default='Ascend', type=str, help='which platform to use, Ascend or GPU')
+arguments = parser.parse_args()
+
+
+def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
+ """Create pseudo beta features."""
+ is_gly = np.equal(aatype, residue_constants.restype_order['G'])
+ ca_idx = residue_constants.atom_order['CA']
+ cb_idx = residue_constants.atom_order['CB']
+ pseudo_beta = np.where(np.tile(is_gly[..., None].astype("int32"), \
+ [1, ] * len(is_gly.shape) + [3, ]).astype("bool"), \
+ all_atom_positions[..., ca_idx, :], \
+ all_atom_positions[..., cb_idx, :])
+ if all_atom_masks is not None:
+ pseudo_beta_mask = np.where(is_gly, all_atom_masks[..., ca_idx], all_atom_masks[..., cb_idx])
+ pseudo_beta_mask = pseudo_beta_mask.astype(np.float32)
+ return pseudo_beta, pseudo_beta_mask
+ return pseudo_beta
+
+
+def contact_evaluation(final_atom_positions, aatype, contact_mask_input):
+ '''contact_evaluation'''
+ if contact_mask_input.sum() < 1:
+ return 1.0
+ contact_mask_input = contact_mask_input.astype(np.float32)
+ pseudo_beta_pred = pseudo_beta_fn(aatype, final_atom_positions, None) # CA as CB for glycine
+ cb_distance_pred = np.sqrt((np.square(pseudo_beta_pred[None] - pseudo_beta_pred[:, None])).sum(-1) + 1e-8)
+ has_contact_pred = (cb_distance_pred <= 10).astype(np.float32) # 8.0 or 10.0
+
+ contact_pred_rate_input = ((has_contact_pred == contact_mask_input) * \
+ contact_mask_input).sum() / (contact_mask_input.sum() + 1e-8)
+
+ return round(contact_pred_rate_input, 4)
+
+
+def make_contact_info(ori_seq_len, ur_path):
+ '''make_contact_info'''
+ num_residues = ori_seq_len
+ contact_info_mask = np.zeros((num_residues, num_residues))
+ if not ur_path:
+ return contact_info_mask
+
+ with open(ur_path, encoding='utf-8') as f:
+ all_urs = f.readlines()
+ all_urs = [i.split('!')[0].rstrip() for i in all_urs]
+ useful_urs = []
+ for urls in all_urs:
+ i = urls.split(" ")
+ temp = []
+ temp.append(int(i[0]))
+ temp.append(int(i[-1]))
+ useful_urs.append(temp)
+
+ for i in useful_urs:
+ contact_info_mask[i[0], i[1]] = 1
+ contact_info_mask = (contact_info_mask + contact_info_mask.T) > 0
+ contact_info_mask = contact_info_mask.astype(np.float32)
+
+ return contact_info_mask
+
+
+def fold_infer(args):
+ '''rasp inference'''
+ data_cfg = load_config(args.data_config)
+ model_cfg = load_config(args.model_config)
+ data_cfg.eval.crop_size = get_crop_size(args.input_path, args.use_pkl)
+ model_cfg.seq_length = data_cfg.eval.crop_size
+ if args.run_platform == "GPU":
+ pynvml.nvmlInit()
+ pynvml.nvmlSystemGetDriverVersion()
+ handle = pynvml.nvmlDeviceGetHandleByIndex(0)
+ info = pynvml.nvmlDeviceGetMemoryInfo(handle)
+ total = info.total / 1024 / 1024 / 1024
+ if total <= 25:
+ model_cfg.slice = model_cfg.slice_new
+ slice_key = "seq_" + str(model_cfg.seq_length)
+ slice_val = vars(model_cfg.slice)[slice_key]
+ model_cfg.slice = slice_val
+
+ megafold = MegaFold(model_cfg, mixed_precision=args.mixed_precision)
+
+ if args.mixed_precision:
+ fp32_white_list = (nn.Softmax, nn.LayerNorm)
+ amp_convert(megafold, fp32_white_list)
+ else:
+ megafold.to_float(mstype.float32)
+
+ temp_names = os.listdir(args.input_path)
+ prot_names = []
+
+ if args.use_custom:
+ mk_hhsearch_db(args.template_path)
+ if not args.use_pkl:
+ os.makedirs(args.a3m_path, exist_ok=True)
+ os.makedirs(args.template_path, exist_ok=True)
+ feature_generator = RawFeatureGenerator(data_cfg.database_search, args.a3m_path, args.template_path,
+ args.use_custom, args.use_template)
+ for key in temp_names:
+ if "fas" in key:
+ prot_names.append(key)
+ else:
+ feature_generator = None
+ for key in temp_names:
+ if "pkl" in key:
+ prot_names.append(key)
+
+ load_checkpoint(args.checkpoint_file, megafold)
+
+ for prot_file in prot_names:
+ prot_name = prot_file.split('.')[0]
+ raw_feature = get_raw_feature(os.path.join(args.input_path, prot_file), feature_generator, args.use_pkl,
+ prot_name)
+ ori_res_length = raw_feature['msa'].shape[1]
+ ur_path = f"{args.restraints_path}/{prot_name}.txt"
+ contact_info_mask_new = make_contact_info(model_cfg.seq_length, ur_path)
+ contact_info_mask_new = Tensor(contact_info_mask_new, mstype.float32)
+ processed_feature = Feature(data_cfg, raw_feature)
+ feat, prev_pos, prev_msa_first_row, prev_pair = processed_feature.pipeline(data_cfg, \
+ mixed_precision=args.mixed_precision)
+
+ prev_pos = Tensor(prev_pos)
+ prev_msa_first_row = Tensor(prev_msa_first_row)
+ prev_pair = Tensor(prev_pair)
+ t1 = time.time()
+ for i in range(4):
+ feat_i = [Tensor(x[i]) for x in feat]
+ result = megafold(*feat_i,
+ prev_pos,
+ prev_msa_first_row,
+ prev_pair,
+ contact_info_mask_new)
+ prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits = result
+ eval_res = contact_evaluation(prev_pos.asnumpy()[:ori_res_length],
+ feat[4][0][:ori_res_length],
+ contact_info_mask_new[:ori_res_length, :ori_res_length].asnumpy())
+ t2 = time.time()
+ final_atom_positions = prev_pos.asnumpy()[:ori_res_length]
+ final_atom_mask = feat[16][0][:ori_res_length]
+ predicted_lddt_logits = predicted_lddt_logits.asnumpy()[:ori_res_length]
+ confidence, plddt = compute_confidence(predicted_lddt_logits, return_lddt=True)
+ print("confidence of predicted structrue :", confidence, " , time :", t2 - t1, ", restraint recall :", eval_res)
+ b_factors = plddt[:, None] * final_atom_mask
+
+ unrelaxed_protein = from_prediction(final_atom_positions,
+ final_atom_mask,
+ feat[4][0][:ori_res_length],
+ feat[17][0][:ori_res_length],
+ b_factors)
+ pdb_file = to_pdb(unrelaxed_protein)
+ os.makedirs("./result/", exist_ok=True)
+ unrelaxed_pdb_file_path = os.path.join("./result/", f'{prot_name}.pdb')
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(unrelaxed_pdb_file_path, os_flags, os_modes), 'w') as fout:
+ fout.write(pdb_file)
+
+
+if __name__ == "__main__":
+ if arguments.run_platform == 'Ascend':
+ context.set_context(mode=context.GRAPH_MODE,
+ memory_optimize_level="O1",
+ device_target="Ascend",
+ max_call_depth=6000,
+ device_id=arguments.device_id)
+ arguments.mixed_precision = 1
+ elif arguments.run_platform == 'GPU':
+ context.set_context(mode=context.GRAPH_MODE,
+ memory_optimize_level="O1",
+ device_target="GPU",
+ max_call_depth=6000,
+ device_id=arguments.device_id,)
+ arguments.mixed_precision = 0
+ fold_infer(arguments)
diff --git a/MindSPONGE/applications/research/FAAST/run_relax.py b/MindSPONGE/applications/research/FAAST/run_relax.py
new file mode 100644
index 0000000000000000000000000000000000000000..273145a84faecbe52bdc5707b9f5fed4b4eb92f2
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/run_relax.py
@@ -0,0 +1,87 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"run_relax"
+import os
+import stat
+import numpy as np
+
+from mindsponge.common import protein, residue_constants
+from nmr_relax.relax import relax
+
+RELAX_MAX_ITERATIONS = 0
+RELAX_ENERGY_TOLERANCE = 2.39
+RELAX_STIFFNESS = 10.0
+RELAX_EXCLUDE_RESIDUES = []
+RELAX_MAX_OUTER_ITERATIONS = 1
+
+
+def make_atom14_masks(feature):
+ """Construct denser atom positions (14 dimensions instead of 37)."""
+ # create the corresponding mask
+ restype_atom37_mask = np.zeros([21, 37], np.float32)
+ for restype, restype_letter in enumerate(residue_constants.restypes):
+ restype_name = residue_constants.restype_1to3[restype_letter]
+ atom_names = residue_constants.residue_atoms[restype_name]
+ for atom_name in atom_names:
+ atom_type = residue_constants.atom_order[atom_name]
+ restype_atom37_mask[restype, atom_type] = 1
+
+ residx_atom37_mask = restype_atom37_mask[feature.get('aatype')]
+
+ return residx_atom37_mask
+
+
+def get_amber_input(input_file_path):
+ '''get_amber_input'''
+ with open(input_file_path, 'r') as f:
+ prot_pdb = protein.from_pdb_string(f.read())
+ aatype = prot_pdb.aatype
+ b_factors = prot_pdb.b_factors
+ seq_len = len(aatype)
+ atom_positions = prot_pdb.atom_positions.astype(np.float32)
+ atom37_mask = prot_pdb.atom_mask.astype(np.float32)
+ residue_index = np.array(range(seq_len), dtype=np.int32)
+ features = {'aatype': aatype,
+ 'all_atom_positions': atom_positions,
+ 'all_atom_mask': atom37_mask}
+ atom_mask = make_atom14_masks(features)
+ result = (aatype, atom_positions, atom_mask, residue_index, b_factors)
+
+ return result
+
+
+def run_relax(input_file_path, output_file_path):
+ '''run_relax'''
+
+ amber_relaxer = relax.AmberRelaxation(
+ max_iterations=RELAX_MAX_ITERATIONS,
+ tolerance=RELAX_ENERGY_TOLERANCE,
+ stiffness=RELAX_STIFFNESS,
+ exclude_residues=RELAX_EXCLUDE_RESIDUES,
+ max_outer_iterations=RELAX_MAX_OUTER_ITERATIONS)
+
+ result = get_amber_input(input_file_path)
+ aatype, atom_positions, atom_mask, residue_index, b_factors = result
+ data = [aatype, residue_index, atom_positions, atom_mask, b_factors]
+ unrelaxed_protein = protein.from_prediction_new(data)
+
+ # Relax the prediction.
+ relaxed_pdb_str, _, _ = amber_relaxer.process(prot=unrelaxed_protein)
+
+ # Save the relaxed PDB.
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(output_file_path, os_flags, os_modes), "w") as fout:
+ fout.write(relaxed_pdb_str)
diff --git a/MindSPONGE/applications/research/FAAST/search.py b/MindSPONGE/applications/research/FAAST/search.py
new file mode 100644
index 0000000000000000000000000000000000000000..8adcfd3d6ce5ccd6f9b7a738422b34add5376d83
--- /dev/null
+++ b/MindSPONGE/applications/research/FAAST/search.py
@@ -0,0 +1,363 @@
+# Copyright 2023 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"search"
+import os
+import time
+import random
+import tarfile
+import logging
+import stat
+from io import StringIO
+from pathlib import Path
+import shutil
+import requests
+from Bio.PDB import MMCIFParser, PDBParser, MMCIF2Dict
+from tqdm import tqdm
+from mindsponge.common import residue_constants
+
+logger = logging.getLogger(__name__)
+
+TQDM_BAR_FORMAT = '{l_bar}{bar}| {n_fmt}/{total_fmt} [elapsed: {elapsed} remaining: {remaining}]'
+DEFAULT_API_SERVER = "https://api.colabfold.com"
+
+
+def validate_and_fix_mmcif(cif_file: Path):
+ """validate presence of _entity_poly_seq in cif file and add revision_date if missing"""
+ # check that required poly_seq and revision_date fields are present
+ cif_dict = MMCIF2Dict.MMCIF2Dict(cif_file)
+ required = [
+ "_chem_comp.id",
+ "_chem_comp.type",
+ "_struct_asym.id",
+ "_struct_asym.entity_id",
+ "_entity_poly_seq.mon_id",
+ ]
+ for r in required:
+ if r not in cif_dict:
+ raise ValueError(f"mmCIF file {cif_file} is missing required field {r}.")
+ if "_pdbx_audit_revision_history.revision_date" not in cif_dict:
+ logger.info(
+ f"Adding missing field revision_date to {cif_file}. Backing up original file to {cif_file}.bak."
+ )
+ shutil.copy2(cif_file, str(cif_file) + ".bak")
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(cif_file, os_flags, os_modes), 'a') as f:
+ f.write(CIF_REVISION_DATE)
+
+
+def convert_pdb_to_mmcif(pdb_file: Path):
+ """convert existing pdb files into mmcif with the required poly_seq and revision_date"""
+ i = pdb_file.stem
+ cif_file = pdb_file.parent.joinpath(f"{i}.cif")
+ if cif_file.is_file():
+ return
+ parser = PDBParser(QUIET=True)
+ structure = parser.get_structure(i, pdb_file)
+ cif_io = CFMMCIFIO()
+ cif_io.set_structure(structure)
+ cif_io.save(str(cif_file), ReplaceOrRemoveHetatmSelect())
+
+
+def mk_hhsearch_db(template_dir: str):
+ '''colabsearch_db'''
+ template_path = Path(template_dir)
+
+ cif_files = template_path.glob("*.cif")
+ for cif_file in cif_files:
+ validate_and_fix_mmcif(cif_file)
+
+ pdb_files = template_path.glob("*.pdb")
+ for pdb_file in pdb_files:
+ convert_pdb_to_mmcif(pdb_file)
+
+ pdb70_db_files = template_path.glob("pdb70*")
+ for f in pdb70_db_files:
+ os.remove(f)
+
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+
+ with os.fdopen(os.open(template_path.joinpath("pdb70_a3m.ffdata"), os_flags, os_modes), 'w') \
+ as a3m, os.fdopen(os.open(template_path.joinpath("pdb70_cs219.ffindex"), \
+ os_flags, os_modes), 'w') as cs219_index, os.fdopen( \
+ os.open(template_path.joinpath("pdb70_a3m.ffindex"), \
+ os_flags, os_modes), 'w') as a3m_index, os.fdopen(os.open( \
+ template_path.joinpath("pdb70_cs219.ffdata"), os_flags, os_modes), 'w') as cs219:
+ n = 1000000
+ index_offset = 0
+ cif_files = template_path.glob("*.cif")
+ for cif_file in cif_files:
+ with open(cif_file) as f:
+ cif_string = f.read()
+ cif_fh = StringIO(cif_string)
+ parser = MMCIFParser(QUIET=True)
+ structure = parser.get_structure("none", cif_fh)
+ models = list(structure.get_models())
+ model = models[0]
+ for chain in model:
+ amino_acid_res = []
+ for res in chain:
+ if res.id[2] != " ":
+ continue
+ amino_acid_res.append(
+ residue_constants.restype_3to1.get(res.resname, "X")
+ )
+
+ protein_str = "".join(amino_acid_res)
+ a3m_str = f">{cif_file.stem}_{chain.id}\n{protein_str}\n\0"
+ a3m_str_len = len(a3m_str)
+ a3m_index.write(f"{n}\t{index_offset}\t{a3m_str_len}\n")
+ cs219_index.write(f"{n}\t{index_offset}\t{len(protein_str)}\n")
+ index_offset += a3m_str_len
+ a3m.write(a3m_str)
+ cs219.write("\n\0")
+ n += 1
+
+
+def run_mmseqs2(x, a3m_result_path, template_path, use_env=True, use_filters=True,
+ use_templates=True, filters=None, use_pairing=False,
+ host_url="https://api.colabfold.com"):
+ '''run_mmseqs2'''
+ submission_endpoint = "ticket/pair" if use_pairing else "ticket/msa"
+
+ def submit(seqs, mode, ns=101):
+ n, query = ns, ""
+ for seq in seqs:
+ query += f">{n}\n{seq}\n"
+ n += 1
+
+ while True:
+ error_count = 0
+ try:
+ # https://requests.readthedocs.io/en/latest/user/advanced/#advanced
+ # "good practice to set connect timeouts to slightly larger than a multiple of 3"
+ res = requests.post(f'{host_url}/{submission_endpoint}', data={'q': query, 'mode': mode}, timeout=6.02,
+ verify=False)
+ except requests.exceptions.Timeout:
+ logger.warning("Timeout while submitting to MSA server. Retrying...")
+ continue
+ except Exception as e:
+ error_count += 1
+ logger.warning(f"Error while fetching result from MSA server. Retrying... ({error_count}/5)")
+ logger.warning(f"Error: {e}")
+ time.sleep(5)
+ if error_count > 5:
+ raise
+ continue
+ break
+
+ try:
+ out = res.json()
+ except ValueError:
+ logger.error(f"Server didn't reply with json: {res.text}")
+ out = {"status": "ERROR"}
+ return out
+
+ def status(ids):
+ while True:
+ error_count = 0
+ try:
+ res = requests.get(f'{host_url}/ticket/{ids}', timeout=6.02, verify=False)
+ except requests.exceptions.Timeout:
+ logger.warning("Timeout while fetching status from MSA server. Retrying...")
+ continue
+ except Exception as e:
+ error_count += 1
+ logger.warning(f"Error while fetching result from MSA server. Retrying... ({error_count}/5)")
+ logger.warning(f"Error: {e}")
+ time.sleep(5)
+ if error_count > 5:
+ raise
+ continue
+ break
+ try:
+ out = res.json()
+ except ValueError:
+ logger.error(f"Server didn't reply with json: {res.text}")
+ out = {"status": "ERROR"}
+ return out
+
+ def download(ids, path):
+ error_count = 0
+ while True:
+ try:
+ res = requests.get(f'{host_url}/result/download/{ids}', timeout=6.02, verify=False)
+ except requests.exceptions.Timeout:
+ logger.warning("Timeout while fetching result from MSA server. Retrying...")
+ continue
+ except Exception as e:
+ error_count += 1
+ logger.warning(f"Error while fetching result from MSA server. Retrying... ({error_count}/5)")
+ logger.warning(f"Error: {e}")
+ time.sleep(5)
+ if error_count > 5:
+ raise
+ continue
+ break
+
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(path, os_flags, os_modes), 'wb') as out:
+ out.write(res.content)
+
+ # process input x
+ seqs = [x] if isinstance(x, str) else x
+
+ # compatibility to old option
+ if filters is not None:
+ use_filters = filters
+
+ # setup mode
+ if use_filters:
+ mode = "env" if use_env else "all"
+ else:
+ mode = "env-nofilters" if use_env else "nofilters"
+
+ if use_pairing:
+ mode = ""
+ use_templates = False
+ use_env = False
+
+ # define path
+ path = a3m_result_path
+ if not os.path.isdir(path):
+ os.mkdir(path)
+
+ # call mmseqs2 api
+ tar_gz_file = f'{path}/out.tar.gz'
+ ns, redo = 101, True
+
+ # deduplicate and keep track of order
+ seqs_unique = []
+ # TODO this might be slow for large sets
+ [seqs_unique.append(x) for x in seqs if x not in seqs_unique]
+ # lets do it!
+
+ if not os.path.isfile(tar_gz_file):
+ time_estimate = 150 * len(seqs_unique)
+ with tqdm(total=time_estimate, bar_format=TQDM_BAR_FORMAT) as pbar:
+ while redo:
+ pbar.set_description("SUBMIT")
+
+ # Resubmit job until it goes through
+ out = submit(seqs_unique, mode, ns)
+ while out.get("status") in ["UNKNOWN", "RATELIMIT"]:
+ sleep_time = 5 + random.randint(0, 5)
+ logger.error(f"Sleeping for {sleep_time}s. Reason: {out.get('status')}")
+ # resubmit
+ time.sleep(sleep_time)
+ out = submit(seqs_unique, mode, ns)
+
+ if out.get("status") == "ERROR":
+ raise Exception(
+ f'MMseqs2 API is giving errors. Please confirm your input is a valid protein sequence. '
+ f'If error persists, please try again an hour later.')
+
+ if out.get("status") == "MAINTENANCE":
+ raise Exception(f'MMseqs2 API is undergoing maintenance. Please try again in a few minutes.')
+
+ # wait for job to finish
+ ids, times1 = out.get("id"), 0
+ pbar.set_description(out.get("status"))
+ while out.get("status") in ["UNKNOWN", "RUNNING", "PENDING"]:
+ t = 5 + random.randint(0, 5)
+ logger.error(f"Sleeping for {t}s. Reason: {out.get('status')}")
+ time.sleep(t)
+ out = status(ids)
+ pbar.set_description(out.get("status"))
+ if out.get("status") == "RUNNING":
+ times1 += t
+ pbar.update(n=t)
+
+ if out.get("status") == "COMPLETE":
+ if times1 < time_estimate:
+ pbar.update(n=(time_estimate - times1))
+ redo = False
+
+ if out.get("status") == "ERROR":
+ raise Exception(
+ f'MMseqs2 API is giving errors. Please confirm your input is a valid protein '
+ f'sequence. If error persists, please try again an hour later.')
+
+ # Download results
+ download(ids, tar_gz_file)
+
+ # prep list of a3m files
+ if use_pairing:
+ a3m_files = [f"{path}/pair.a3m"]
+ else:
+ a3m_files = [f"{path}/uniref.a3m"]
+ if use_env:
+ a3m_files.append(f"{path}/bfd.mgnify30.metaeuk30.smag30.a3m")
+
+ # extract a3m files
+ if any(not os.path.isfile(a3m_file) for a3m_file in a3m_files):
+ with tarfile.open(tar_gz_file) as tar_gz:
+ tar_gz.extractall(path)
+
+ # templates
+ if use_templates:
+ templates = {}
+ for line in open(f"{path}/pdb70.m8", "r"):
+ p = line.rstrip().split()
+ ms, pdb, _, _ = p[0], p[1], p[2], p[10]
+ ms = int(ms)
+ if ms not in templates:
+ templates[ms] = []
+ templates.get(ms).append(pdb)
+
+ template_paths = {}
+ for k, tmpl in templates.items():
+ tmpl_path = f"{template_path}_{k}"
+ if not os.path.isdir(tmpl_path):
+ os.mkdir(tmpl_path)
+ tmpl_line = ",".join(tmpl[:20])
+ response = None
+ while True:
+ error_count = 0
+ try:
+ response = requests.get(f"{host_url}/template/{tmpl_line}", stream=True, timeout=6.02,
+ verify=False)
+ except requests.exceptions.Timeout:
+ logger.warning("Timeout while submitting to template server. Retrying...")
+ continue
+ except Exception as e:
+ error_count += 1
+ logger.warning(
+ f"Error while fetching result from template server. Retrying... ({error_count}/5)")
+ logger.warning(f"Error: {e}")
+ time.sleep(5)
+ if error_count > 5:
+ raise
+ continue
+ break
+ with tarfile.open(fileobj=response.raw, mode="r|gz") as tar:
+ tar.extractall(path=tmpl_path)
+ os.symlink("pdb70_a3m.ffindex", f"{tmpl_path}/pdb70_cs219.ffindex")
+ os_flags = os.O_RDWR | os.O_CREAT
+ os_modes = stat.S_IRWXU
+ with os.fdopen(os.open(f"{tmpl_path}/pdb70_cs219.ffdata", os_flags, os_modes), 'w') as f:
+ f.write("")
+ template_paths[k] = tmpl_path
+
+ # gather a3m lines
+ os.system(f"cp -r {tmpl_path}/* {template_path} && rm -rf {tmpl_path}")
+
+
+def colabsearch(sequence, a3m_result_path, template_path):
+ run_mmseqs2(sequence, a3m_result_path, template_path, use_filters=True,
+ host_url="https://a3m.mmseqs.com")
+ mk_hhsearch_db(template_path)
diff --git a/MindSPONGE/applications/research/ProteinMPNN/src/datasets.py b/MindSPONGE/applications/research/ProteinMPNN/src/datasets.py
index 6a6a577d8292590e057341f7a282e503e7670e6d..ce1644e0ec43598757a959642aeb5e71564d665f 100644
--- a/MindSPONGE/applications/research/ProteinMPNN/src/datasets.py
+++ b/MindSPONGE/applications/research/ProteinMPNN/src/datasets.py
@@ -308,7 +308,7 @@ def append_cluster(ix, lengths, define_batch, clusters, batch_128, batch_256, ba
elif lengths[ix] > 1024 and len(batch_) == define_batch[4]:
clusters.append(batch_)
batch_ = []
- output = (clusters, batch_128, batch_256, batch_512, batch_1024, batch_)
+ output = (clusters, batch_128, batch_256, batch_512, batch_1024, batch_)
return output
diff --git a/MindSPONGE/applications/research/ProteinMPNN/src/model.py b/MindSPONGE/applications/research/ProteinMPNN/src/model.py
index 1a0e449007f00c44ee13b64a7d0a3ff15445a1e8..78386b0dc6521049c07c794b9dafdd4b8f5c6f25 100644
--- a/MindSPONGE/applications/research/ProteinMPNN/src/model.py
+++ b/MindSPONGE/applications/research/ProteinMPNN/src/model.py
@@ -21,6 +21,7 @@ from mindspore.common.initializer import initializer, XavierUniform
def gather_edges(edges, neighbor_idx):
+ """gather_edges"""
# Features [B,N,N,C] at Neighbor indices [B,N,K] => Neighbor features [B,N,K,C]
neighbors = ops.broadcast_to(ops.expand_dims(neighbor_idx, -1),
(neighbor_idx.shape[0], neighbor_idx.shape[1], neighbor_idx.shape[2], edges.shape[-1]))
@@ -29,6 +30,7 @@ def gather_edges(edges, neighbor_idx):
def gather_nodes(nodes, neighbor_idx):
+ """gather_nodes"""
# Features [B,N,C] at Neighbor indices [B,N,K] => [B,N,K,C]
# Flatten and expand indices per batch [B,N,K] => [B,NK] => [B,NK,C]
neighbors_flat = neighbor_idx.view((neighbor_idx.shape[0], -1))
@@ -36,11 +38,13 @@ def gather_nodes(nodes, neighbor_idx):
(neighbors_flat.shape[0], neighbors_flat.shape[1], nodes.shape[2]))
# Gather and re-pack
neighbor_features = ops.GatherD()(nodes, 1, neighbors_flat)
- neighbor_features = neighbor_features.view(tuple(list(neighbor_idx.shape)[:3] + [-1]))
+ neighbor_features = neighbor_features.view((neighbor_idx.shape[0], neighbor_idx.shape[1],
+ neighbor_idx.shape[2], -1))
return neighbor_features
def gather_nodes_t(nodes, neighbor_idx):
+ """gather_nodes_t"""
# Features [B,N,C] at Neighbor index [B,K] => Neighbor features[B,K,C]
idx_flat = ops.broadcast_to(ops.expand_dims(neighbor_idx, -1),
(neighbor_idx.shape[0], neighbor_idx.shape[1], nodes.shape[2]))
@@ -242,7 +246,11 @@ class ProteinFeatures(nn.Cell):
rbf_all.append(self._get_rbf(c, cb, e_idx)) # C-cb
rbf_all.append(self._get_rbf(o, cb, e_idx)) # o-cb
rbf_all.append(self._get_rbf(c, o, e_idx)) # C-O
- rbf_all = ops.Concat(axis=-1)(tuple(rbf_all))
+ rbf_all = ops.Concat(axis=-1)((rbf_all[0], rbf_all[1], rbf_all[2], rbf_all[3], rbf_all[4], rbf_all[5],
+ rbf_all[6], rbf_all[7], rbf_all[8], rbf_all[9], rbf_all[10], rbf_all[11],
+ rbf_all[12], rbf_all[13], rbf_all[14], rbf_all[15], rbf_all[16], rbf_all[17],
+ rbf_all[18], rbf_all[19], rbf_all[20], rbf_all[21], rbf_all[22], rbf_all[23],
+ rbf_all[24]))
offset = residue_idx[:, :, None] - residue_idx[:, None, :]
offset = gather_edges(offset[:, :, :, None], e_idx)[:, :, :, 0] # [B, L, K]
diff --git a/MindSPONGE/applications/research/ProteinMPNN/src/utils.py b/MindSPONGE/applications/research/ProteinMPNN/src/utils.py
index 344aada30db936685b69dd4df0391857c23356e1..50dfd98e8dd44065e2354c132a091c01c0dd7130 100644
--- a/MindSPONGE/applications/research/ProteinMPNN/src/utils.py
+++ b/MindSPONGE/applications/research/ProteinMPNN/src/utils.py
@@ -313,7 +313,7 @@ class LossSmoothed(nn.Cell):
s_onehot = ops.Cast()(nn.OneHot(depth=21)(s), ms.float32)
# Label smoothing
- s_onehot = s_onehot + self.weight / float(s_onehot.shape[-1])
+ s_onehot = s_onehot + self.weight / s_onehot.shape[-1]
s_onehot = s_onehot / ops.ReduceSum(keep_dims=True)(s_onehot, -1)
loss = -(s_onehot * log_probs).sum(-1)
diff --git a/MindSPONGE/applications/research/ProteinMPNN/train.py b/MindSPONGE/applications/research/ProteinMPNN/train.py
index 7e432acbdee3671875a0d8c2ef4d2f1184a0c4c0..21ce401e39a6465c799443c451351abef29d8c07 100644
--- a/MindSPONGE/applications/research/ProteinMPNN/train.py
+++ b/MindSPONGE/applications/research/ProteinMPNN/train.py
@@ -181,5 +181,5 @@ if __name__ == "__main__":
argparser.add_argument('--device_target', help='device target', type=str, default="Ascend")
args_ = argparser.parse_args()
- ms.set_context(device_target='GPU', device_id=args_.device_id, mode=ms.GRAPH_MODE)
+ ms.set_context(device_target=args_.device_target, device_id=args_.device_id, mode=ms.GRAPH_MODE)
main(args_)
diff --git a/MindSPONGE/docs/api/api_python/cell/mindsponge.cell.Attention.rst b/MindSPONGE/docs/api/api_python/cell/mindsponge.cell.Attention.rst
index 51366d6f2f94d131c90caa7a564b14dfa39fc3c0..92de1fe10cd795809176050357d13578003eea75 100644
--- a/MindSPONGE/docs/api/api_python/cell/mindsponge.cell.Attention.rst
+++ b/MindSPONGE/docs/api/api_python/cell/mindsponge.cell.Attention.rst
@@ -24,7 +24,7 @@ mindsponge.cell.Attention
- **m_data** (Tensor) - shape为 :math:`(batch\_size, value\_seq_length, m\_data_dim)` 的key和value Tensor,其中value_seq_length是value向量的序列长度。
- **attention_mask** (Tensor) - 注意力矩阵的mask。shape为 :math:`(batch\_size, num\_heads, query\_seq_length, value\_seq_length)`。
- **index** (Tensor) - 在while循环中的索引,仅在有while控制流时使用。默认值: ``None``。
- - **nonbatched_bias** (Tensor) - attention矩阵中无batch维的偏置。shape为 :math:`(num\_heads, query\_seq_length, value_seq_length)`。默认值: ``None``。
+ - **nonbatched_bias** (Tensor) - attention矩阵中无batch维的偏置。shape为 :math:`(num\_heads, query\_seq_length, value\_seq_length)`。默认值: ``None``。
输出:
Tensor。Attention层的输出tensor,shape是 :math:`(batch\_size, query\_seq_length, hidden\_size)`。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/common/mindsponge.common.get_pdb_info.rst b/MindSPONGE/docs/api/api_python/common/mindsponge.common.get_pdb_info.rst
index 347cfb79d7c0ec4167f24bcdbfcb28a9821082ac..eeed4b18308b9579ee661e9bfc1a3d6bc13c2142 100644
--- a/MindSPONGE/docs/api/api_python/common/mindsponge.common.get_pdb_info.rst
+++ b/MindSPONGE/docs/api/api_python/common/mindsponge.common.get_pdb_info.rst
@@ -25,6 +25,3 @@ mindsponge.common.get_pdb_info
- **atom14_alt_gt_exists** (numpy.array) 按照稠密编码方式编码,对应手性蛋白全原子掩码。shape :math:`(N_{res}, 14)` 。
- **atom14_atom_is_ambiguous** (numpy.array) 由于部分氨基酸结构具有局部对称性,其对称原子编码可调换,具体原子参考 `common.residue_atom_renaming_swaps` 该特征记录了原子不确定的编码位置。shape :math:`(N_{res}, 14)` 。
- **residue_index** (numpy.array) 蛋白质序列编码index信息,大小从1到 :math:`N_{res}` 。shape :math:`(N_{res}, )` 。
-
- 符号:
- - :math:`N_{res}` - 蛋白质中氨基酸个数,按蛋白质一级序列排列。
diff --git a/MindSPONGE/docs/api/api_python/control/mindsponge.control.Controller.rst b/MindSPONGE/docs/api/api_python/control/mindsponge.control.Controller.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a1aca91febed3f61fa94ef5f9a77b0042ec62326
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/control/mindsponge.control.Controller.rst
@@ -0,0 +1,140 @@
+mindsponge.control.Controller
+=============================
+
+.. py:class:: mindsponge.control.Controller(system: Molecule, control_step: int = 1)
+
+ MindSPONEG的控制器模块中的基类。
+ 在 `Updater` 中使用 `Controller` 用于控制仿真过程中的七个变量,包括坐标、速度、力、能量、动力学、维里和PBC box。
+
+ 参数:
+ - **system** (Molecule) - 模拟系统。
+ - **control_step** (int) - 控制器执行的步骤间隔。默认值:1。
+
+ 输入:
+ - **coordinate** (Tensor) - shape为 `(B, A, D)` 的Tensor。数据类型是float。
+ - **velocity** (Tensor) - shape为 `(B, A, D)` 的Tensor。数据类型是float。
+ - **force** (Tensor) - shape为 `(B, A, D)` 的Tensor。数据类型是float。
+ - **energy** (Tensor) - shape为 `(B, 1)` 的Tensor。数据类型是float。
+ - **kinetics** (Tensor) - shape为 `(B, D)` 的Tensor。数据类型是float。
+ - **virial** (Tensor) - shape为 `(B, D)` 的Tensor。数据类型是float。
+ - **pbc_box** (Tensor) - shape为 `(B, D)` 的Tensor。数据类型是float。
+ - **step** (int) - 模拟步数。默认值:0
+
+ 输出:
+ - 坐标,shape为 `(B, A, D)` 的Tensor。数据类型是float。
+ - 速度,shape为 `(B, A, D)` 的Tensor。数据类型是float。
+ - 力,shape为 `(B, A, D)` 的Tensor。数据类型是float。
+ - 能量,shape为 `(B, 1)` 的Tensor。数据类型是float。
+ - 动力学,shape为 `(B, D)` 的Tensor。数据类型是float。
+ - 维里,shape为 `(B, D)` 的Tensor。数据类型是float。
+ - 周期性边界条件PBC box,shape为 `(B, D)` 的Tensor。数据类型是float。
+
+ 符号:
+ - **B** - Batch size。
+ - **A** - 原子总数。
+ - **D** - 仿真系统的空间维度。通常是3。
+
+ .. py:method:: boltzmann()
+
+ 获取当前单元中的玻尔兹曼常数。
+
+ 返回:
+ float。当前单元中的玻尔兹曼常数。
+
+ .. py:method:: set_time_step(dt)
+
+ 设置模拟单步时间。
+
+ 参数:
+ - **dt** (float) - 单步时长。
+
+ .. py:method:: set_degrees_of_freedom(dofs)
+
+ 设置自由度(DOFs)。
+
+ 参数:
+ - **dofs** (int) - 自由度。
+
+ .. py:method:: update_coordinate(coordinate)
+
+ 更新模拟系统的坐标。
+
+ 参数:
+ - **coordinate** (Tensor) - 原子坐标的Tensor。shape为 `(B, A, D)` 。数据类型为float。
+
+ 返回:
+ Tensor。更新后的坐标的Tensor,shape和数据类型与原来一致。
+
+ .. py:method:: update_pbc_box(pbc_box)
+
+ 更新周期性边界条件box的参数。
+
+ 参数:
+ - **pbc_box** (Tensor) - 周期性边界条件box的Tensor。shape为 `(B, D)` 。数据类型为float。
+
+ 返回:
+ Tensor。更新后的PBC box的Tensor,shape和数据类型与原来的 `pbc_box` 一致。
+
+ .. py:method:: get_kinetics(velocity)
+
+ 根据速度计算动力学。
+
+ 参数:
+ - **velocity** (Tensor) - 原子速度的Tensor。shape为 `(B, A, D)` 。数据类型为float。
+
+ 返回:
+ Tensor,动力学。shape为 `(B, A, D)` 。数据类型为float。
+
+ .. py:method:: get_temperature(kinetics=None)
+
+ 根据速度计算温度。
+
+ 参数:
+ - **kinetics** (Tensor) - 动力学的Tensor。shape为 `(B, D)` 。数据类型为float。默认值:"None"。
+
+ 返回:
+ Tensor,温度。shape为 `(B)` 。数据类型为float。
+
+ .. py:method:: get_volume(pbc_box)
+
+ 根据周期性边界条件box计算容积。
+
+ 参数:
+ - **pbc_box** (Tensor) - 用于计算容积的周期性边界条件。shape为 `(B, D)` 。数据类型为float。
+
+ 返回:
+ Tensor,容积。shape为 `(B)` 。数据类型为float。
+
+ .. py:method:: get_pressure(kinetics, virial, pbc_box)
+
+ 根据动力学,维里和周期性边界条件计算压力。
+
+ 参数:
+ - **kinetics** (Tensor) - 动力学的Tensor。shape为 `(B, D)` 。数据类型为float。
+ - **virial** (Tensor) - 维里的Tensor。shape为 `(B, D)` 。数据类型为float。
+ - **pbc_box** (Tensor) - 周期性边界条件box的Tensor。shape为 `(B, D)` 。数据类型为float。
+
+ 返回:
+ Tensor。根据动力学,维里,周期性边界条件box计算压力。shape为 `(B, D)` 。数据类型为float。
+
+ .. py:method:: get_com(coordinate, keepdims=True)
+
+ 计算质心坐标。
+
+ 参数:
+ - **coordinate** (Tensor) - 原子坐标的Tensor。shape为 `(B, A, D)` 。数据类型为float。
+ - **keepdims** (bool) - 如果为True,在结果中保持第二根轴对应的维度且长度为1。默认值:True。
+
+ 返回:
+ Tensor。质心坐标。shape为 `(B, A, D)` 或 `(B, D)` 。数据类型为float。
+
+ .. py:method:: get_com_velocity(velocity, keepdims=True)
+
+ 计算质心速度。
+
+ 参数:
+ - **velocity** (Tensor) - 速度的Tensor。shape为 `(B, A, D)` 。数据类型为float。
+ - **keepdims** (bool) - 如果为True,在结果中保持第二根轴对应的维度且长度为1。默认值:True。
+
+ 返回:
+ Tensor。质心速度。shape为 `(B, A, D)` 或 `(B, D)` 。数据类型为float。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/core/mindsponge.core.RunOneStepCell.rst b/MindSPONGE/docs/api/api_python/core/mindsponge.core.RunOneStepCell.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4cfa448a9b771428858957a14819f97cc2fa59c3
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/core/mindsponge.core.RunOneStepCell.rst
@@ -0,0 +1,161 @@
+mindsponge.core.RunOneStepCell
+==============================
+
+.. py:class:: mindsponge.core.RunOneStepCell(energy: WithEnergyCell = None, force: WithForceCell = None, optimizer: Optimizer = None, steps: int = 1, sens: float = 1.0,)
+
+ 运行一步模拟的神经网络层。这一层包裹了 `energy` , `force` 和 `optimizer` 。在construct函数里将会生成一张反向图来更新仿真系统的原子坐标。
+
+ 参数:
+ - **energy** (WithEnergyCell) - 包含了有势能函数的模拟系统的神经网络层。默认值:"None"。该神经网络层用于计算并返回系统在当前坐标处的势能值。
+ - **force** (WithForceCell) - 包含了有原子力函数的模拟系统的神经网络层。默认值:"None"。该神经网络层用于计算并返回系统在当前坐标处的力值。
+ - **optimizer** (Optimizer) - 模拟的优化器。默认值:"None"。
+ - **steps** (int) - 模拟的步数。默认值:1.0。
+ - **sens** (float) - 作为反向传播的输入要填充的缩放数。默认值:1.0。
+
+ 输入:
+ - **\*inputs** (Tuple(Tensor)) - `WithEnergyCell` 的输入Tensors的tuple。
+
+ 输出:
+ - 整体的势能,shape为 `(B, 1)` 的Tensor,数据类型为float。
+ - 原子力,shape为 `(B, A, D)` 的Tensor,数据类型为float。
+
+ .. py:method:: neighbour_list_pace()
+
+ 更新邻居列表所需的step。
+
+ 返回:
+ int,更新邻居列表所需的step数。
+
+ .. py:method:: energy_cutoff()
+
+ `WithEnergyCell` 中邻居列表的截断距离。
+
+ 返回:
+ Tensor, `WithEnergyCell` 中邻居列表的截断距离。
+
+ .. py:method:: force_cutoff()
+
+ `WithForceCell` 中邻居列表的截断距离。
+
+ 返回:
+ Tensor, `WithForceCell` 中邻居列表的截断距离。
+
+ .. py:method:: length_unit()
+
+ 长度单位。
+
+ 返回:
+ str,长度单位。
+
+ .. py:method:: energy_unit()
+
+ 能量单位。
+
+ 返回:
+ str,能量单位。
+
+ .. py:method:: num_energies()
+
+ 能量项 :math:`U` 的数量。
+
+ 返回:
+ int,能量项的数量。
+
+ .. py:method:: energy_names()
+
+ 能量项的名字。
+
+ 返回:
+ list[str],能量项的名字列表。
+
+ .. py:method:: bias_names()
+
+ 偏置势能的名字。
+
+ 返回:
+ list[str],偏置势能的名字列表。
+
+ .. py:method:: num_biases()
+
+ 偏置势能 :math:`V` 的数量。
+
+ 返回:
+ int,偏置势能的数量。
+
+ .. py:method:: energies()
+
+ 势能组成部分的Tensor。
+
+ 返回:
+ Tensor,shape为 `(B, U)` ,数据类型为float。
+
+ .. py:method:: biases()
+
+ 偏置势的组成部分的Tensor。
+
+ 返回:
+ Tensor,shape为 `(B, V)` ,数据类型为float。
+
+ .. py:method:: bias()
+
+ 整个偏置势的Tensor。
+
+ 返回:
+ Tensor,shape为 `(B, 1)` ,数据类型为float。
+
+ .. py:method:: bias_function()
+
+ 偏置势函数的网络层。
+
+ 返回:
+ Cell,偏置势函数。
+
+ .. py:method:: update_neighbour_list()
+
+ 更新邻居列表。
+
+ .. py:method:: update_bias(step)
+
+ 更新偏置势。
+
+ 参数:
+ - **step** (int) - 更新偏置势的仿真step。
+
+ .. py:method:: update_wrapper(step)
+
+ 更新能量包。
+
+ 参数:
+ - **step** (int) - 更新能量包的仿真step。
+
+ .. py:method:: update_modifier(step)
+
+ 更新力修饰器。
+
+ 参数:
+ - **step** (int) - 更新力修饰器的仿真step。
+
+ .. py:method:: set_pbc_grad(value)
+
+ 设定是否计算周期性边界条件箱的梯度。
+
+ 参数:
+ - **value** (bool) - 用于判断是否计算周期性边界条件箱的梯度的标志符。
+
+ .. py:method:: set_steps(step)
+
+ 设置JIT的步数。
+
+ 参数:
+ - **steps** (int) - JIT的步数。
+
+ .. py:method:: run_one_step(*inputs)
+
+ 运行单步模拟。
+
+ 参数:
+ - **/*inputs** (Tuple(Tensor)) - `WithEnergyCell` 的输入Tensors的tuple。
+
+ 返回:
+ - 整体的势能,shape为 `(B, 1)` 的Tensor,数据类型为float。
+ - 原子力,shape为 `(B, A, D)` 的Tensor,数据类型为float。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/core/mindsponge.core.WithEnergyCell.rst b/MindSPONGE/docs/api/api_python/core/mindsponge.core.WithEnergyCell.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e127d570948d1b77d9bdf699ba5086f04142d3c0
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/core/mindsponge.core.WithEnergyCell.rst
@@ -0,0 +1,159 @@
+mindsponge.core.WithEnergyCell
+==============================
+
+.. py:class:: mindsponge.core.WithEnergyCell(system: Molecule, potential: PotentialCell, bias: Union[Bias, List[Bias]] = None, cutoff: float = None, neighbour_list: NeighbourList = None, wrapper: EnergyWrapper = None)
+
+ 用势能函数封装仿真系统的神经网络层。
+ 该神经网络层用于计算并返回系统在当前坐标处的势能值。
+
+ 参数:
+ - **system** (Molecule) - 仿真系统。
+ - **potential** (PotentialCell) - 势能函数层。
+ - **bias** (Union[Bias, List[Bias]]) - 偏差势能函数层。默认值:"None"。
+ - **cutoff** (float) - 邻居列表的截断距离。如果为None,则将其赋值为势能的截止值。默认值:"None"。
+ - **neighbour_list** (NeighbourList) - 邻居列表。默认值:"None"。
+ - **wrapper** (EnergyWrapper) - 包裹和处理势和偏差的网络。默认值:"None"。
+
+ 输入:
+ - **\*inputs** (Tuple(Tensor)) - 'WithEnergyCell'的输入Tensor对。
+
+ 输出:
+ 整个系统的势能, shape为 `(B, 1)` 的Tensor。数据类型为float。
+
+ .. py:method:: cutoff()
+
+ 邻居列表的截断距离。
+
+ 返回:
+ Tensor,截断距离。
+
+ .. py:method:: neighbour_list_pace()
+
+ 邻居列表的更新步长。
+
+ 返回:
+ int,更新步长。
+
+ .. py:method:: length_unit()
+
+ 长度单位。
+
+ 返回:
+ str,长度单位。
+
+ .. py:method:: energy_unit()
+
+ 能量单位,
+
+ 返回:
+ str,能量单位
+
+ .. py:method:: num_energies()
+
+ 能量项 :math:`U` 的数量。
+
+ 返回:
+ int,能量项的数量。
+
+ .. py:method:: num_biases()
+
+ 偏置势能 :math:`V` 的数量。
+
+ 返回:
+ int,偏置势能的数量。
+
+ .. py:method:: energy_names()
+
+ 能量项的名字。
+
+ 返回:
+ list[str],能量项的名字列表。
+
+ .. py:method:: bias_names()
+
+ 偏置势能的名字。
+
+ 返回:
+ list[str],偏置势能的名字列表。
+
+ .. py:method:: energies()
+
+ 势能分量的Tensor。
+
+ 返回:
+ 势能分量的Tensor,shape为 `(B, U)` ,数据类型为float。
+
+ .. py:method:: biases()
+
+ 偏置势分量的Tensor。
+
+ 返回:
+ 偏置势分量的Tensor。shape为 `(B, V)` ,数据类型为float。
+
+ .. py:method:: bias()
+
+ 整体偏置势的Tensor。
+
+ 返回:
+ Tensor,shape为 `(B, 1)` ,数据类型为float。
+
+ .. py:method:: bias_pace(index=0)
+
+ 偏置势的更新频率。
+
+ 参数:
+ - **index** (int) - 偏置势的目录。默认值:0。
+
+ 返回:
+ int,更新频率。
+
+ .. py:method:: set_pbc_grad(grad_box)
+
+ 设置是否计算PBC box的梯度。
+
+ 参数:
+ - **grad_box** (bool) - 是否计算PBC box的梯度。
+
+ .. py:method:: update_neighbour_list()
+
+ 更新邻居列表。
+
+ 返回:
+ - neigh_idx,系统中每个原子邻近原子的目录。shape为 `(B, A, N)` 的Tensor,数量类型为int。
+ - neigh_mask,neigh_idx的掩码。shape为 `(B, A, N)` 的Tensor,数量类型为bool。
+
+ .. py:method:: update_bias(step)
+
+ 更新偏置势。
+
+ 参数:
+ - **step** (int) - 当前仿真步数,当步数整除更新频率余数为0时,更新偏置势。
+
+ .. py:method:: update_wrapper(step)
+
+ 更新能量包装器。
+
+ 参数:
+ - **step** (int) - 当前仿真步数,当步数整除更新频率余数为0时,更新能量包装器。
+
+ .. py:method:: get_neighbour_list()
+
+ 获取邻居列表。
+
+ 返回:
+ - neigh_idx,系统中每个原子邻近原子的目录。shape为 `(B, A, N)` 的Tensor,数量类型为int。
+ - neigh_mask,neigh_idx的掩码。shape为 `(B, A, N)` 的Tensor,数量类型为bool。
+
+ .. py:method:: calc_energies()
+
+ 计算势能的能量项。
+
+ 返回:
+ 能量项,shape为 `(B, U)` 的Tensor。数据类型为float。
+
+ .. py:method:: calc_biases()
+
+ 计算偏置势项。
+
+ 返回:
+ 偏置势项,shape为 `(B, V)` 的Tensor。数据类型为float。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/metrics/mindsponge.metrics.frame_aligned_point_error_map.rst b/MindSPONGE/docs/api/api_python/metrics/mindsponge.metrics.frame_aligned_point_error_map.rst
index 9550f12bde046c62efa183a4f3424537f8aa8d63..08da6c2510609249bd6650561a9fbf1e83474717 100644
--- a/MindSPONGE/docs/api/api_python/metrics/mindsponge.metrics.frame_aligned_point_error_map.rst
+++ b/MindSPONGE/docs/api/api_python/metrics/mindsponge.metrics.frame_aligned_point_error_map.rst
@@ -6,7 +6,7 @@ mindsponge.metrics.frame_aligned_point_error_map
在不同的局部坐标系下计算两个结构的原子位置误差,与 `frame_aligned_point_error` 函数相似,区别在于带批处理逻辑,同时计算多组局部坐标系与真实结构局部坐标系之间的误差,针对每组局部坐标系分别返回一个损失函数值,且只考虑 :math:`C\alpha` 原子,计算逻辑参考 `frame_aligned_point_error`。
参数:
- - **pred_frames** (list) - 预测的蛋白质刚体变换组对应局部坐标系,二维数组,数组的第一个元素是长度为9的tensor的list,代表局部坐标系相对于全局坐标系的旋转矩阵;第二个元素是长度为3的tensor的list,代表局部坐标系相对于全局坐标系的平移矩阵,所有tensor的shape均为 :math:`(N_{recycle}, N\_res)` ,其中 :math:`N_{recycle}` 是Structure模块中FoldIteration的循环次数。 :math:`N_{res}` 是蛋白质中的残基数目。
+ - **pred_frames** (list) - 预测的蛋白质刚体变换组对应局部坐标系,二维数组,数组的第一个元素是长度为9的tensor的list,代表局部坐标系相对于全局坐标系的旋转矩阵;第二个元素是长度为3的tensor的list,代表局部坐标系相对于全局坐标系的平移矩阵,所有tensor的shape均为 :math:`(N_{recycle}, N_{res})` ,其中 :math:`N_{recycle}` 是Structure模块中FoldIteration的循环次数。 :math:`N_{res}` 是蛋白质中的残基数目。
- **target_frames** (list) - 预测的蛋白质刚体变换组对应局部坐标系,也是二维list,shape与 `pred_frames` 一致,为 :math:`(N_{res},)`。
- **frames_mask** (Tensor) - 局部坐标系的mask,shape为 :math:`(N_{res},)` 。
- **pred_positions** (list) - 预测的 :math:`C\alpha` 原子的坐标,长度为3的tensor的一维数组,tensor的shape为 :math:`(N_{recycle}, N_{res},)` 。
diff --git a/MindSPONGE/docs/api/api_python/mindsponge.control.rst b/MindSPONGE/docs/api/api_python/mindsponge.control.rst
new file mode 100644
index 0000000000000000000000000000000000000000..341a115f6c33fbcd969c759bc3d1cf37e6f96248
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/mindsponge.control.rst
@@ -0,0 +1,9 @@
+mindsponge.control
+==================
+
+.. mscnplatformautosummary::
+ :toctree: control
+ :nosignatures:
+ :template: classtemplate.rst
+
+ mindsponge.control.Controller
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/mindsponge.core.rst b/MindSPONGE/docs/api/api_python/mindsponge.core.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a16c5cd8f96981024890a61eb0e6e9744b1f7786
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/mindsponge.core.rst
@@ -0,0 +1,10 @@
+mindsponge.core
+===============
+
+.. mscnplatformautosummary::
+ :toctree: core
+ :nosignatures:
+ :template: classtemplate.rst
+
+ mindsponge.core.RunOneStepCell
+ mindsponge.core.WithEnergyCell
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/mindsponge.optimizer.rst b/MindSPONGE/docs/api/api_python/mindsponge.optimizer.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fc01996ca0092a95b82ece7f5d1240dd64732ddb
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/mindsponge.optimizer.rst
@@ -0,0 +1,9 @@
+mindsponge.optimizer
+====================
+
+.. mscnplatformautosummary::
+ :toctree: optimizer
+ :nosignatures:
+ :template: classtemplate.rst
+
+ mindsponge.optimizer.Updater
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/mindsponge.potential.rst b/MindSPONGE/docs/api/api_python/mindsponge.potential.rst
new file mode 100644
index 0000000000000000000000000000000000000000..9d98b216c12425f2a3146473ae65e325865457b5
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/mindsponge.potential.rst
@@ -0,0 +1,10 @@
+mindsponge.potential
+====================
+
+.. mscnplatformautosummary::
+ :toctree: potential
+ :nosignatures:
+ :template: classtemplate.rst
+
+ mindsponge.potential.EnergyCell
+ mindsponge.potential.potential
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/mindsponge.system.rst b/MindSPONGE/docs/api/api_python/mindsponge.system.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5df3316437275f03ce06ba31f11200f7e689594d
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/mindsponge.system.rst
@@ -0,0 +1,10 @@
+mindsponge.system
+=================
+
+.. mscnplatformautosummary::
+ :toctree: system
+ :nosignatures:
+ :template: classtemplate.rst
+
+ mindsponge.system.Molecule
+ mindsponge.system.Residue
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/optimizer/mindsponge.optimizer.Updater.rst b/MindSPONGE/docs/api/api_python/optimizer/mindsponge.optimizer.Updater.rst
new file mode 100644
index 0000000000000000000000000000000000000000..2c4023d7649772593d2ea8a2520f1ffb9ec5926f
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/optimizer/mindsponge.optimizer.Updater.rst
@@ -0,0 +1,202 @@
+mindsponge.optimizer.Updater
+============================
+
+.. py:class:: mindsponge.optimizer.Updater(system: Molecule, controller: Union[Controller, List[Controller]] = None, time_step: float = 1e-3, velocity: Union[Tensor, ndarray, List[float]] = None, weight_decay: float = 0.0, loss_scale: float = 1.0)
+
+ MindSPONGE更新器的基类。是MindSpore中 `Optimizer` 的特殊子类。 `Updater` 更新仿真系统中的原子坐标。原子坐标的更新要求原子受力和原子速度。力是从外界传递而来,速度是 `Updater` 自己的参数。
+ 当使用周期性边界条件的时候, `Updater` 也能够通过仿真系统的维里更新周期性边界条件箱的尺寸。
+ 在通过一系列的 `Controller` 的仿真模拟中, `Updater` 控制着七个变量的值,分别是:坐标、速度、力、能量、动力学、维里和周期性边界条件箱。如果传入超过一个 `Controller` ,它们将按照队列顺序进行工作。
+
+ 参数:
+ - **system** (Molecule) - 模拟系统。
+ - **controller** (Union[Controller, List[Controller]]) - 控制器或控制器列表来控制模拟系统中的七个变量(坐标、速度、力、能量、动力学、维里和周期性边界条件箱)。默认值:"None"。
+ - **time_step** (float) - 单步时间。默认值:1e-3。
+ - **velocity** (Union[Tensor, ndarray, List[float]]) - 原子速度的array,shape为 `(A, D)` 或 `(B, A, D)`,数据类型为float。默认值:"None"。
+ - **weight_decay** (float) - 权重衰减值。默认值:0.0。
+ - **loss_scale** (float) - 梯度缩放系数。默认值:1.0。
+
+ 输入:
+ - **energy** (Tensor) - 系统的能量。 shape为 `(B, A, D)` 的Tensor。数据类型为float。
+ - **force** (Tensor) - 系统的力。 shape为 `(B, A, D)` 的Tensor。数据类型为float。
+ - **virial** (Tensor) - 系统的维里。 shape为 `(B, A, D)` 的Tensor。数据类型为float。默认值:"None"。
+
+ 输出:
+ bool,是否成功完成当前优化单步并且移动到下一步。
+
+ 符号:
+ - **B** - Batch size。
+ - **A** - 原子总数。
+ - **D** - 模拟系统的维度,一般为3。
+
+ .. py:method:: boltzmann()
+
+ 当前单位下的布尔兹曼常数。
+
+ 返回:
+ float,当前单位下的布尔兹曼常数。
+
+ .. py:method:: press_unit_scale()
+
+ 压力的参考值。
+
+ 返回:
+ float,压力的参考值。
+
+ .. py:method:: set_step(step=0)
+
+ 设置系统的当前步数。
+
+ 参数:
+ - **step** (int) - 系统的当前步数。默认值:0。
+
+ .. py:method:: set_degrees_of_freedom(dofs)
+
+ 设置系统的自由度。
+
+ 参数:
+ - **dofs** (int) - 自由度。
+
+ .. py:method:: update_coordinate(coordinate, success=True)
+
+ 更新坐标的参数。
+
+ 参数:
+ - **coordinate** (Tensor) - 原子的位置坐标的Tensor。数据类型为float。
+ - **success** (bool) - 判断是否更新坐标。默认值:True。
+
+ 返回:
+ bool,是否成功更新了坐标的参数。
+
+ .. py:method:: update_pbc_box(pbc_box, success=True)
+
+ 更新周期性边界条件箱的参数。
+
+ 参数:
+ - **pbc_box** (Tensor) - 周期性边界条件box的Tensor。数据类型为float。
+ - **success** (bool) - 判断是否更新周期性边界条件箱的参数。默认值:True。
+
+ 返回:
+ bool,是否成功更新了周期性边界条件箱的参数。
+
+ .. py:method:: update_velocity(velocity, success=True)
+
+ 更新速度参数。
+
+ 参数:
+ - **velocity** (Tensor) - 原子速度的Tensor。数据类型为float。
+ - **success** (bool) - 判断是否更新速度参数。默认值:True。
+
+ 返回:
+ bool,是否成功更新了速度参数。
+
+ .. py:method:: update_kinetics(kinetics, success=True)
+
+ 更新动力学参数。
+
+ 参数:
+ - **kinetics** (Tensor) - 动力学的Tensor。数据类型为float。
+ - **success** (bool) - 判断是否更新动力学参数。默认值:True。
+
+ 返回:
+ bool。是否成功更新了动力学参数。
+
+ .. py:method:: update_temperature(temperature, success=True)
+
+ 更新温度参数。
+
+ 参数:
+ - **temperature** (Tensor) - 温度的Tensor。数据类型为float。
+ - **success** (bool) - 判断是否更新温度参数。默认值:True。
+
+ 返回:
+ bool。是否成功更新了温度参数。
+
+ .. py:method:: update_virial(virial, success=True)
+
+ 更新维里参数。
+
+ 参数:
+ - **virial** (Tensor) - 维里的Tensor。数据类型为float。
+ - **success** (bool, 可选) - 判断是否更新维里参数。默认值:True。
+
+ 返回:
+ bool。是否成功更新了维里参数。
+
+ .. py:method:: update_pressure(pressure, success=True)
+
+ 更新压力参数。
+
+ 参数:
+ - **pressure** (Tensor) - 压力的Tensor。数据类型为float。
+ - **success** (bool, 可选) - 判断是否更新压力参数。默认值:True。
+
+ 返回:
+ bool。是否成功更新了压力参数。
+
+ .. py:method:: get_velocity()
+
+ 获取速度。
+
+ 返回:
+ Tensor,系统中原子的速度。
+
+ .. py:method:: get_kinetics(velocity)
+
+ 获取动力学。
+
+ 参数:
+ - **velocity** (Tensor) - 原子速度的Tensor,数据类型为float。
+
+ 返回:
+ Tensor,系统中的动力学。
+
+ .. py:method:: get_temperature(kinetics=None)
+
+ 获取温度。
+
+ 参数:
+ - **kinetics** (Tensor) - 动力学的Tensor,数据类型为float。默认值:"None"。
+
+ 返回:
+ Tensor,系统的温度。
+
+ .. py:method:: get_pressure(kinetics, virial, pbc_box)
+
+ 获得压力。
+
+ 参数:
+ - **kinetics** (Tensor) - 动力学的Tensor,数据类型为float。默认值:"None"。
+ - **virial** (Tensor) - 维里的Tensor,数据类型为float。默认值:"None"。
+ - **pbc_box** (Tensor) - 周期性边界条件箱的Tensor,数据类型为float。默认值:"None"。
+
+ 返回:
+ Tensor,系统的压力。
+
+ .. py:method:: get_dt()
+
+ 获取当前单步的学习率。
+
+ 返回:
+ float。当前单步的学习率。
+
+ .. py:method:: next_step(success=True)
+
+ 完成当前优化step并且进行到下一个step。
+
+ 参数:
+ - **success** (bool) - 是否完成当前优化step并且移动到下一步。默认值:True。
+
+ 返回:
+ bool,是否成功完成当前优化step并且移动到下一步。
+
+ .. py:method:: decay_and_scale_grad(force, virial=None)
+
+ 对力和维里进行权重衰减和梯度标度。
+
+ 参数:
+ - **force** (Tensor) - 力的Tensor,数据类型为float。
+ - **virial** (Tensor) - 维里的Tensor,数据类型为float。默认值:"None"。
+
+ 返回:
+ - Tensor,权重衰减和梯度标度之后的力。
+ - Tensor,权重衰减和梯度标度之后的维里。如果pbc_box是None,输出维里与输入保持一致。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/potential/mindsponge.potential.EnergyCell.rst b/MindSPONGE/docs/api/api_python/potential/mindsponge.potential.EnergyCell.rst
new file mode 100644
index 0000000000000000000000000000000000000000..162cfafe415e4545b441d3bd22145def4f435c7c
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/potential/mindsponge.potential.EnergyCell.rst
@@ -0,0 +1,85 @@
+mindsponge.potential.EnergyCell
+===============================
+
+.. py:class:: mindsponge.potential.EnergyCell(name: str = 'energy', length_unit: str = 'nm', energy_unit: str = 'kj/mol', use_pbc: bool = None)
+
+ 能量项的基础类。 `EnergyCell` 通常被用作传统力场中单独的能量项的一个基类。力场参数通常有单位,因此作为能量项的 `EnergyCell` 的单位必须与力场参数的单位保持一致,而不是与全局单位相同。
+
+ 参数:
+ - **name** (str) - 能量的名称。默认值:"energy"。
+ - **length_unit** (str) - 长度单位。如果是None的话,与全局长度单位保持一致。默认值:"nm"。
+ - **energy_unit** (str) - 能量单位。如果是None的话,与全局能量单位保持一致。默认值:"kj/mol"。。
+ - **use_pbc** (bool) - 是否使用周期性边界条件。默认值:"None"。
+
+ 输出:
+ Tensor。能量,shape为 `(B, 1)` ,数据类型为float。
+
+ .. py:method:: name()
+
+ 能量的名称。
+
+ 返回:
+ str,能量的名称。
+
+ .. py:method:: use_pbc()
+
+ 判断是否使用周期性边界条件。
+
+ 返回:
+ bool,返回一个标志来判断是否使用了周期性边界条件。
+
+ .. py:method:: length_unit()
+
+ 长度单位。
+
+ 返回:
+ str,长度单位。
+
+ .. py:method:: energy_unit()
+
+ 能量单位。
+
+ 返回:
+ str,能量单位。
+
+ .. py:method:: set_input_unit(length_unit)
+
+ 设置输入坐标的长度单位。
+
+ 参数:
+ - **length_unit** (Union[str, Units, Length]) - 输入坐标的长度单位。
+
+ .. py:method:: set_cutoff(cutoff, unit=None)
+
+ 设置截断距离。
+
+ 参数:
+ - **cutoff** (float) - 截断距离。
+ - **unit** (str) - 长度单位。默认值:"None"。
+
+ .. py:method:: set_pbc(use_pbc)
+
+ 设置是否使用周期性边界条件。
+
+ 参数:
+ - **use_pbc** (bool) - 是否使用周期性边界条件。
+
+ .. py:method:: convert_energy_from(unit)
+
+ 将能量数值从外部单位换算到内部单位。
+
+ 参数:
+ - **unit** (str) - 能量的单位。
+
+ 返回:
+ float,从外部单位换算到内部单位的能量数值。
+
+ .. py:method:: convert_energy_to(unit)
+
+ 将能量数值从内部单位换算到外部单位。
+
+ 参数:
+ - **unit** (str) - 能量的单位。
+
+ 返回:
+ float,从内部单位换算到外部单位的能量数值。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/potential/mindsponge.potential.PotentialCell.rst b/MindSPONGE/docs/api/api_python/potential/mindsponge.potential.PotentialCell.rst
new file mode 100644
index 0000000000000000000000000000000000000000..d935ba2aecd470e71450b147f130cb658b0c42e2
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/potential/mindsponge.potential.PotentialCell.rst
@@ -0,0 +1,66 @@
+mindsponge.potential.PotentialCell
+==================================
+
+.. py:class:: mindsponge.potential.PotentialCell(num_energies: int = 1, energy_names: Union[str, List[str]] = 'potential', length_unit: str = None, energy_unit: str = None, use_pbc: bool = None, name: str = 'potential')
+
+ 势能的基类。
+
+ `PotentialCell` 是 `EnergyCell` 的一个特殊子类。普通的 `EnergyCell` 只输出一个能量项,所以 `EnergyCell` 返回一个shape为 `(B, 1)` 的Tensor。
+ `PotentialCell` 能够返回多个能量项,所以它的返回值是shape为 `(B, E)` 的Tensor。除此之外,默认情况下, 'PotentialCell' 的单位等于全局单位。
+
+ 参数:
+ - **num_energies** (int) - 输出的能量项的数量。默认值:1
+ - **energy_names** (Union[str, List[str]]) - 能量项的名字。默认值:"potential"。
+ - **length_unit** (str) - 长度单位。如果未被给出,则使用全局长度单位。默认值:"None"。
+ - **energy_unit** (str) - 能量单位。如果未被给出,则使用全局能量单位。默认值:"None"。
+ - **use_pbc** (bool) - 是否使用周期性边界条件。如果为None,则不使用周期性边界条件。默认值:"None"。
+ - **name** (str) - 能量的名字。默认值:"potential"。
+
+ 输入:
+ - **coordinates** (Tensor) - 系统中原子的位置坐标。shape为 (B, A, D) 的Tensor。数据类型为float。
+ - **neighbour_index** (Tensor) - 相邻原子的目录。shape (B, A, N) 的Tensor。数据类型为int。默认值:"None"。
+ - **neighbour_mask** (Tensor) - 相邻原子的掩码。shape (B, A, N) 的Tensor。数据类型为bool。默认值:"None"。
+ - **neighbour_vector** (Tensor) - 从中心原子指向相邻原子的向量。shape (B, A, N, D) 的Tensor。数据类型为bool。默认值:"None"。
+ - **neighbour_distances** (Tensor) - 相邻原子之间的距离。shape (B, A, N) 的Tensor。数据类型为float。默认值:"None"。
+ - **pbc_box** (Tensor) - PBC box。shape (B, D) 的Tensor。数据类型为float。默认值:"None"。
+
+ 输出:
+ 势,shape为 `(B, E)` 的Tensor。数据类型为float。
+
+ .. py:method:: exclude_index()
+
+ 排除索引。
+
+ 返回:
+ Tensor。排除索引。
+
+ .. py:method:: num_energies()
+
+ 获取能量分量的数量。
+
+ 返回:
+ int,能量分量的数量。
+
+ .. py:method:: energy_names()
+
+ 获取能量名称的列表。
+
+ 返回:
+ list[str],能量名称的列表。
+
+ .. py:method:: set_exclude_index(exclude_index)
+
+ 设置排除索引。
+
+ 参数:
+ - **exclude_index** (Tensor) - 应该从非键相互作用中被排除的原子的索引。
+
+ 返回:
+ Tensor,排除索引。
+
+ .. py:method:: set_pbc(use_pbc=None)
+
+ 设置是否使用周期性边界条件PBC。
+
+ 参数:
+ - **use_pbc** (bool) - 是否使用周期性边界条件。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python/system/mindsponge.system.Molecule.rst b/MindSPONGE/docs/api/api_python/system/mindsponge.system.Molecule.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3304a4077988c4d29792f0bb2ffc4f6131ec4937
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python/system/mindsponge.system.Molecule.rst
@@ -0,0 +1,349 @@
+mindsponge.system.Molecule
+==========================
+
+.. py:class:: mindsponge.system.Molecule(atoms: Union[List[Union[str, int]], ndarray] = None, atom_name: Union[List[str], ndarray] = None, atom_type: Union[List[str], ndarray] = None, atom_mass: Union[Tensor, ndarray, List[float]] = None, atom_charge: Union[Tensor, ndarray, List[float]] = None, atomic_number: Union[Tensor, ndarray, List[float]] = None, bond: Union[Tensor, ndarray, List[int]] = None, coordinate: Union[Tensor, ndarray, List[float]] = None, pbc_box: Union[Tensor, ndarray, List[float]] = None, template: Union[dict, str] = None, residue: Union[Residue, List[Residue]] = None, length_unit: str = None)
+
+ 分子体系层。
+
+ 参数:
+ - **atoms** (Union[List[Union[str, int]], ndarray]) - 体系中的原子。数据可以是原子名称的字符串,也可以是原子序号的int值。默认值: ``None`` 。
+ - **atom_name** (Union[List[str], ndarray]) - 原子名称字符串的array。默认值: ``None`` 。
+ - **atom_type** (Union[List[str], ndarray]) - 原子种类字符串的array。默认值: ``None`` 。
+ - **atom_mass** (Union[Tensor, ndarray, List[float]]) - 原子质量的array,类型为float,shape为 `(B, A)` 。默认值: ``None`` 。
+ - **atom_charge** (Union[Tensor, ndarray, List[float]]) - 原子电荷数的array,类型为float,shape为 `(B, A)` 。默认值: ``None`` 。
+ - **atomic_number** (Union[Tensor, ndarray, List[float]]) - 原子序数的array,类型为int,shape为 `(B, A)` 。默认值: ``None`` 。
+ - **bond** (Union[Tensor, ndarray, List[int]]) - 键连接的array,数据类型为int,shape为 `(B, b, 2)` 。默认值: ``None`` 。
+ - **coordinate** (Union[Tensor, ndarray, List[float]]) - 原子位置坐标 :math:`R` 的Tensor,shape为 :math:`(B, A, D)` ,数据类型为float。默认值: ``None`` 。
+ - **pbc_box** (Union[Tensor, ndarray, List[float]]) - 周期性边界条件的box,shape为 :math:`(B, D)` 或者 :math:`(1, D)` 。默认值: ``None`` 。
+ - **template** (Union[dict, str]) - 分子的模板。可以是一个MindSPONGE模板格式的字典,也可以是一个MindSPONGE模板文件的字符串。如果输入是一个字符串,该类会优先在MindSPONGE模板的构建路径下( `mindsponge.data.template` )搜索与输入同名的文件。默认值: ``None`` 。
+ - **residue** (Union[Residue, List[Residue]]) - 残基或残基列表。如果 `template` 不是 ``None`` 的话,只有模板里的残基会被使用。默认值: ``None`` 。
+ - **length_unit** (str) - 长度单位。如果为 ``None`` ,则使用全局长度单位。默认值: ``None`` 。
+
+ 输出:
+ - 坐标,shape为 `(B, A, D)` 的Tensor。数据类型为float。
+ - 周期性边界条件盒子,shape为 `(B, D)` 的Tensor。数据类型为float。
+
+ 符号:
+ - **B** - Batch size。
+ - **A** - 原子数量。
+ - **b** - 键数量。
+ - **D** - 模拟体系的维度,一般为3。
+
+ .. py:method:: add_residue(residue, coordinate=None)
+
+ 向当前分子系统增加残基。
+
+ 参数:
+ - **residue** (class) - 向系统中增加的残基的 `Residue` 类。
+ - **coordinate** (Tensor) - 输入残基的坐标。默认值: ``None`` 。
+
+ .. py:method:: append(system)
+
+ 向当前分子系统添加系统。
+
+ 参数:
+ - **system** (Molecule) - 添加进该分子系统的另一个分子系统。
+
+ .. py:method:: build_atom_charge()
+
+ 构建原子电荷数。
+
+ .. py:method:: build_atom_type()
+
+ 构建原子种类。
+
+ .. py:method:: build_space(coordinate, pbc_box=None)
+
+ 构建坐标系和周期性边界条件箱。
+
+ 参数:
+ - **coordinate** (Tensor) - 系统的初始坐标。如果是 ``None`` ,系统会随机生成一个坐标作为它的初始坐标。
+ - **pbc_box** (Tensor) - 系统的初始周期性边界条件箱。如果是 ``None`` ,则系统不会使用周期性边界系统。默认值: ``None`` 。
+
+ .. py:method:: build_system()
+
+ 通过残基构建系统。
+
+ .. py:method:: calc_colvar(colvar)
+
+ 计算系统中特定的集体变量的值。
+
+ 参数:
+ - **colvar** () - 一般的集体变量 :math:`s(R)` 的基类。
+
+ 返回:
+ Tensor,集体变量 :math:`s(R)` 的值。
+
+ .. py:method:: calc_image(shift=0)
+
+ 计算坐标图。
+
+ 参数:
+ - **shift** (float) - 相对于箱子尺寸 :math:`\vec{L}` 的偏移比 :math:`c` 。默认值: ``0`` 。
+
+ 返回:
+ Tensor,坐标图。
+
+ .. py:method:: convert_length_from(unit)
+
+ 从指定的单位转换长度。
+
+ 参数:
+ - **unit** (Union[str, Units, Length, float, int]) - 长度单位。
+
+ 返回:
+ float,从指定单位转换所得长度。
+
+ .. py:method:: convert_length_to(unit)
+
+ 把长度转换到指定的单位。
+
+ 参数:
+ - **unit** (Union[str, Units, Length, float, int]) - 长度单位。
+
+ 返回:
+ float,根据特定单位换算所得长度。
+
+ .. py:method:: coordinate_in_pbc(shift=0)
+
+ 获取在整个周期性边界条件箱中的坐标。
+
+ 参数:
+ - **shift** (float) - 相对于箱子尺寸的偏移比。默认值: ``0`` 。
+
+ 返回:
+ Tensor,周期性边界条件箱中的坐标。shape为 `(B, ..., D)` ,数据类型为float。
+
+ .. py:method:: shape()
+
+ 原子坐标的shape。
+
+ 返回:
+ Tensor,原子坐标的shape。
+
+ .. py:method:: ndim()
+
+ 原子坐标的维度数量。
+
+ 返回:
+ int,原子坐标的维度的数量。
+
+ .. py:method:: length_unit()
+
+ 长度单位。
+
+ 返回:
+ str,长度单位。
+
+ .. py:method:: heavy_atom_mask()
+
+ 重原子(非氢原子)的掩码。
+
+ 返回:
+ Tensor,重原子的掩码。
+
+ .. py:method:: move(shift=None)
+
+ 移动系统的坐标。
+
+ 参数:
+ - **shift** (Tensor) - 系统的移动距离。默认值: ``None`` 。
+
+ .. py:method:: copy(shift=None)
+
+ 返回一个复制当前 `Molecule` 参数的 `Molecule` 类。
+
+ 参数:
+ - **shift** (Tensor) - 系统的移动距离。默认值: ``None`` 。
+
+ 返回:
+ class,复制了当前 `Molecule` 类的参数的 `Molecule` 类。
+
+ .. py:method:: reduplicate(shift)
+
+ 复制系统让其扩大到原来的两倍。
+
+ 参数:
+ - **shift** (Tensor) - 从原始系统移动的距离。
+
+ .. py:method:: set_bond_length(bond_length)
+
+ 设置键长。
+
+ 参数:
+ - **bond_length** (Tensor) - 设置系统的键长。
+
+ .. py:method:: residue_index(res_id)
+
+ 获得残基的索引。
+
+ 参数:
+ - **res_id** (int) - 残基ID参数。
+
+ 返回:
+ Tensor。残基在系统中的索引。
+
+ .. py:method:: residue_bond(res_id)
+
+ 获得残基键的索引。
+
+ 参数:
+ - **res_id** (int) - 残基ID参数。
+
+ 返回:
+ Tensor。残基键的索引。
+
+ .. py:method:: residue_head(res_id)
+
+ 获取残基的头索引。
+
+ 参数:
+ - **res_id** (int) - 残基ID参数。
+
+ 返回:
+ Tensor。残基的头索引。
+
+ .. py:method:: residue_tail(res_id)
+
+ 获得残基的尾索引。
+
+ 参数:
+ - **res_id** (int) - 残基ID参数。
+
+ 返回:
+ Tensor。残基的尾索引。
+
+ .. py:method:: residue_coordinate(res_id)
+
+ 获得残基坐标。
+
+ 参数:
+ - **res_id** (int) - 残基ID参数。
+
+ 返回:
+ Tensor。系统中残基的坐标。
+
+ .. py:method:: get_volume()
+
+ 获得系统的容积。
+
+ 返回:
+ Tensor。系统的容积。如果没有使用周期性边界条件箱,容积为None。
+
+ .. py:method:: space_parameters()
+
+ 获取空间的参数(坐标和周期性边界条件箱)。
+
+ 返回:
+ list。坐标和周期性边界条件箱。如果周期性边界条件箱未使用,则只返回坐标。
+
+ .. py:method:: trainable_params(recurse=True)
+
+ 获取可训练参数。
+
+ 参数:
+ - **recurse** (bool) - 如果为True,则产生此网络层和所有子网络层的参数。否则,只产生作为此网络层直接成员的参数。默认值: ``True`` 。
+
+ 返回:
+ list,所有可训练参数的list。
+
+ .. py:method:: update_coordinate(coordinate)
+
+ 更新坐标的参数。
+
+ 参数:
+ - **coordinate** (Tensor) - 用于更新系统坐标的坐标。
+
+ 返回:
+ Tensor。更新后的系统坐标。
+
+ .. py:method:: set_coordianate(coordinate)
+
+ 设定坐标的值。
+
+ 参数:
+ - **coordianate** (Tensor) - 用于设定系统坐标的坐标。
+
+ 返回:
+ Tensor,系统的坐标。
+
+ .. py:method:: update_pbc_box(pbc_box)
+
+ 更新周期性边界条件箱。
+
+ 参数:
+ - **pbc_box** (Tensor) - 用于更新系统周期性边界条件箱的周期性边界条件箱。
+
+ 返回:
+ Tensor,更新后的周期性边界条件箱。
+
+ .. py:method:: set_pbc_grad(grad_box)
+
+ 设置是否计算周期性边界条件箱的梯度。
+
+ 参数:
+ - **grad_box** (bool) - 是否计算周期性边界条件箱的梯度。
+
+ 返回:
+ bool,是否计算周期性边界条件箱的梯度。
+
+ .. py:method:: set_pbc_box(pbc_box=None)
+
+ 设置周期性边界条件箱。
+
+ 参数:
+ - **pbc_box** (Tensor) - 设置系统的周期性边界条件箱。如果是None,系统不会使用周期性边界条件箱。默认值: ``None`` 。
+
+ 返回:
+ Tensor,系统的周期性边界条件箱。
+
+ .. py:method:: repeat_box(lattices)
+
+ 根据周期性边界条件的box的格点重复系统。
+
+ 参数:
+ - **lattices** (list) - 周期性边界条件箱的格点。
+
+ .. py:method:: update_image(image=None)
+
+ 更新坐标图。
+
+ 参数:
+ - **image** (Tensor) - 用于更新系统坐标图的坐标图。默认值: ``None`` 。
+
+ 返回:
+ bool,是否成功更新了系统坐标图。
+
+ .. py:method:: set_length_unit(unit)
+
+ 设定系统的长度单位。
+
+ 参数:
+ - **unit** (Union[str, Units, Length, float, int]) - 长度单位。
+
+ .. py:method:: get_atoms(atoms)
+
+ 从系统中获取原子。
+
+ 参数:
+ - **atoms** (Union[Tensor, Parameter, ndarray, str, list, tuple]) - 原子列表。
+
+ 返回:
+ class。原子或一些原子。
+
+ .. py:method:: get_coordinate(atoms=None)
+
+ 获取坐标的Tensor。
+
+ 参数:
+ - **atoms** (class) - 特殊原子群的基类,在MindSPONGE中被用作 `atoms group module` 。默认值: ``None`` 。
+
+ 返回:
+ Tensor,坐标。数据类型为float。
+
+ .. py:method:: get_pbc_box()
+
+ 获取周期性边界条件箱。
+
+ 返回:
+ Tensor。周期性边界条件箱。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/preparing/system/mindsponge.system.Residue.rst b/MindSPONGE/docs/api/api_python/system/mindsponge.system.Residue.rst
similarity index 31%
rename from MindSPONGE/docs/api/preparing/system/mindsponge.system.Residue.rst
rename to MindSPONGE/docs/api/api_python/system/mindsponge.system.Residue.rst
index c3f6c31f7548a91a43d2eddc2283aa884b34a01a..84efd243e362be57886c274c2952bd35bb8bace8 100644
--- a/MindSPONGE/docs/api/preparing/system/mindsponge.system.Residue.rst
+++ b/MindSPONGE/docs/api/api_python/system/mindsponge.system.Residue.rst
@@ -1,17 +1,20 @@
mindsponge.system.Residue
=========================
-.. py:class:: mindsponge.system.Residue(atom_name=None, atom_type=None, atom_mass=None, atom_charge=None, atomic_number=None, bond=None, head_atom=None, tail_atom=None, start_index=0, name='MOL', template=None)
+.. py:class:: mindsponge.system.Residue(atom_name: Union[List[str], ndarray] = None, atom_type: Union[List[str], ndarray] = None, atom_mass: Union[Tensor, ndarray, List[float]] = None, atom_charge: Union[Tensor, ndarray, List[float]] = None, atomic_number: Union[Tensor, ndarray, List[float]] = None, bond: Union[Tensor, ndarray, List[int]] = None, head_atom: int = None, tail_atom: int = None, start_index: int = 0, name: str = 'MOL', template: Union[dict, str] = None)
- 小分子中残基的类。
+ 残基的基类。 `Residue` 神经元是 `Molecule`(system) 的组成部分。 `Residue` 不止可以代表单一的氨基酸残基,还可以代表分子系统中的一个小分子,例如一个水分子,一个无机盐离子等。这代表着 `Residue` 和PDB文件中的 "residue" 有着相似的概念。
+
+ .. Note::
+ `Residue` 只用来表示原子属性和键连接关系,不包含原子坐标。
参数:
- - **atom_name** (list) - 原子名称。默认值:"None"。
- - **atom_type** (list) - 原子种类。默认值:"None"。
- - **atom_mass** (Tensor) - 原子质量。默认值:"None"。
- - **atom_charge** (Tensor) - 原子电荷。默认值:"None"。
- - **atomic_number** (Tensor) - 原子序数。默认值:"None"。
- - **bond** (Tensor) - 边序号。默认值:"None"。
+ - **atom_name** (Union[List[str], ndarray]) - 原子名称的array,数据类型为str。默认值:"None"。
+ - **atom_type** (Union[List[str], ndarray]) - 原子种类的array,数据类型为str。默认值:"None"。
+ - **atom_mass** (Union[Tensor, ndarray, List[float]]) - 原子质量的array,shape为 `(B, A)` ,数据类型为float。默认值:"None"。
+ - **atom_charge** (Union[Tensor, ndarray, List[float]]) - 原子电荷的array,shape为 `(B, A)` ,数据类型为float。默认值:"None"。
+ - **atomic_number** (Union[Tensor, ndarray, List[float]]) - 原子序数的array,shape为 `(B, A)` ,数据类型为float。默认值:"None"。
+ - **bond** (Union[Tensor, ndarray, List[int]]) - 键连接的array,shape为 `(B, b, 2)` ,数据类型为int。默认值:"None"。
- **head_atom** (int) - 与前一个残基相连接的头原子的索引。默认值:"None"。
- **tail_atom** (int) - 与下一个残基相连的尾原子的索引。默认值:"None"。
- **start_index** (int) - 残基中第一个原子的开始索引。默认值:0。
@@ -21,60 +24,67 @@ mindsponge.system.Residue
符号:
- **B** - Batch size。
- **A** - 原子总数。
- - **b** - 边总数。
+ - **b** - 键总数。
- .. py:method:: add_atom(atom_name=None, atom_type=None, atom_mass=None, atom_charge=None, atomic_number=None)
+ .. py:method:: name()
- 设定原子。
+ 获取残基的名称。
+
+ 返回:
+ str,残基名称。
+
+ .. py:method:: build_atom_mass(template)
+
+ 按照模板中原子名称对应的原子索引,获取模板对应索引的原子质量并加到残基对应原子上。
参数:
- - **atom_name** (Union[numpy.ndarray, list(str)]) - 原子名称。默认值:"None"。
- - **atom_type** (Union[numpy.ndarray, list(str)]) - 原子种类。默认值:"None"。
- - **atom_mass** (Tensor) - 原子质量。默认值:"None"。
- - **atom_charge** (Tensor) - 原子电荷数。默认值:"None"。
- - **atomic_number** (Tensor) - 原子序数。默认值:"None"。
+ - **template** (dict) - 残基的模板。
- .. py:method:: broadcast_multiplicity(multi_system)
+ .. py:method:: build_atomic_number(template)
- 将信息广播到所选择的多系统中。
+ 按照模板中原子名称对应的原子索引,获取模板对应索引的原子数并加到残基对应原子上。
参数:
- - **multi_system** (int) - 多系统中系统的数量。
+ - **template** (dict) - 残基的模板。
- .. py:method:: build_atom_charge(template)
+ .. py:method:: build_atom_type(template)
- 把原子电荷数附到原子的索引中。
+ 按照模板中原子名称对应的原子索引,获取模板对应索引的原子种类并加到残基对应原子上。
参数:
- - **template** (Union[dict, str]) - 残基的模板。
+ - **template** (dict) - 残基的模板。
- .. py:method:: build_atom_mass(template)
+ .. py:method:: build_atom_charge(template)
- 把原子的质量附到原子的索引中。
+ 按照模板中原子名称对应的原子索引,获取模板对应索引的原子电荷数并加到残基对应原子上。
参数:
- - **template** (Union[dict, str]) - 残基的模板。
+ - **template** (dict) - 残基的模板。
- .. py:method:: build_atom_type(template)
+ .. py:method:: build_bond(template)
- 把原子种类附到原子的索引中。
+ 按照模板中原子名称对应的原子索引,获取模板对应索引的原子的化学键并加到残基对应原子上。
参数:
- **template** (Union[dict, str]) - 残基的模板。
- .. py:method:: build_atomic_number(template)
+ .. py:method:: add_atom(atom_name=None, atom_type=None, atom_mass=None, atom_charge=None, atomic_number=None)
- 把原子数附到原子的索引中。
+ 把一个原子添加到残基中。
参数:
- - **template** (Union[dict, str]) - 残基的模板。
+ - **atom_name** (str) - 原子名称。默认值:"None"。
+ - **atom_type** (str) - 原子种类。默认值:"None"。
+ - **atom_mass** (float) - 原子质量。默认值:"None"。
+ - **atom_charge** (float) - 原子电荷数。默认值:"None"。
+ - **atomic_number** (str) - 原子序数。默认值:"None"。
- .. py:method:: build_bond(template)
+ .. py:method:: broadcast_multiplicity(multi_system)
- 把原子的边附到原子的索引中。
+ 将信息广播到所选择的多系统中。
参数:
- - **template** (Union[dict, str]) - 残基的模板。
+ - **multi_system** (int) - 多系统中系统的数量。
.. py:method:: set_name(name)
@@ -85,7 +95,7 @@ mindsponge.system.Residue
.. py:method:: set_start_index(start_index)
- 设定残基中第一个原子的开始索引。
+ 设定残基的开始索引。
参数:
- - **start_index** (int) - 残基中第一个原子的开始索引。
\ No newline at end of file
+ - **start_index** (int) - 残基的开始索引。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python_en/mindsponge.control.rst b/MindSPONGE/docs/api/api_python_en/mindsponge.control.rst
new file mode 100644
index 0000000000000000000000000000000000000000..75f1f6a32781bddd01b296897daa53fb1cc00721
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python_en/mindsponge.control.rst
@@ -0,0 +1,9 @@
+mindsponge.control
+==================
+
+.. msplatformautosummary::
+ :toctree: control
+ :nosignatures:
+ :template: classtemplate.rst
+
+ mindsponge.control.Controller
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python_en/mindsponge.core.rst b/MindSPONGE/docs/api/api_python_en/mindsponge.core.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3470a7de11e5e6f361da1125ad05a99f9436b48d
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python_en/mindsponge.core.rst
@@ -0,0 +1,10 @@
+mindsponge.core
+===============
+
+.. msplatformautosummary::
+ :toctree: core
+ :nosignatures:
+ :template: classtemplate.rst
+
+ mindsponge.core.RunOneStepCell
+ mindsponge.core.WithEnergyCell
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python_en/mindsponge.optimizer.rst b/MindSPONGE/docs/api/api_python_en/mindsponge.optimizer.rst
new file mode 100644
index 0000000000000000000000000000000000000000..18309199ee02dc4c6bbfa6774d871c968f131b0d
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python_en/mindsponge.optimizer.rst
@@ -0,0 +1,9 @@
+mindsponge.optimizer
+====================
+
+.. msplatformautosummary::
+ :toctree: optimizer
+ :nosignatures:
+ :template: classtemplate.rst
+
+ mindsponge.optimizer.Updater
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python_en/mindsponge.potential.rst b/MindSPONGE/docs/api/api_python_en/mindsponge.potential.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f87c8fa7e43d3c941e535c8aec70de85db4642b2
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python_en/mindsponge.potential.rst
@@ -0,0 +1,10 @@
+mindsponge.potential
+====================
+
+.. msplatformautosummary::
+ :toctree: potential
+ :nosignatures:
+ :template: classtemplate.rst
+
+ mindsponge.potential.EnergyCell
+ mindsponge.potential.potential
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/api_python_en/mindsponge.system.rst b/MindSPONGE/docs/api/api_python_en/mindsponge.system.rst
new file mode 100644
index 0000000000000000000000000000000000000000..40fc985bbae2225793329441dccc156795c11f91
--- /dev/null
+++ b/MindSPONGE/docs/api/api_python_en/mindsponge.system.rst
@@ -0,0 +1,10 @@
+mindsponge.system
+=================
+
+.. msplatformautosummary::
+ :toctree: system
+ :nosignatures:
+ :template: classtemplate.rst
+
+ mindsponge.system.Molecule
+ mindsponge.system.Residue
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/preparing/control/mindsponge.control.Controller.rst b/MindSPONGE/docs/api/preparing/control/mindsponge.control.Controller.rst
deleted file mode 100644
index 9196435b869038375ad68294bf2f15bb30dbcb2c..0000000000000000000000000000000000000000
--- a/MindSPONGE/docs/api/preparing/control/mindsponge.control.Controller.rst
+++ /dev/null
@@ -1,119 +0,0 @@
-mindsponge.control.Controller
-=============================
-
-.. py:class:: mindsponge.control.Controller(system, control_step=1)
-
- 控制器用于控制仿真过程中的参数,包括积分器、恒温器、气压调节器、约束器等。
-
- 参数:
- - **system** (Molecule) - 模拟系统。
- - **control_step** (int) - 控制器执行的步骤间隔。默认值:1。
-
- .. py:method:: get_com(coordinate)
-
- 计算质心坐标。
-
- 参数:
- - **coordinate** (Tensor) - 坐标。
-
- 返回:
- Tensor。质心坐标。
-
- .. py:method:: get_com_velocity(velocity)
-
- 计算质心速度。
-
- 参数:
- - **velocity** (Tensor) - 速度。
-
- 返回:
- Tensor。质心速度。
-
- .. py:method:: get_kinetics(velocity)
-
- 根据速度计算动力学。
-
- 参数:
- - **velocity** (Tensor) - 速度。
-
- 返回:
- Tensor。根据速度获得的动力学。
-
- .. py:method:: get_pressure(kinetics, virial, pbc_box)
-
- 根据动力学,维里,周期性边界条件box计算压力。
-
- 参数:
- - **kinetics** (Tensor) - 动力学。
- - **virial** (Tensor) - 维里。
- - **pbc_box** (Tensor) - 周期性边界条件box。
-
- 返回:
- Tensor。根据动力学,维里,周期性边界条件box计算压力。
-
- .. py:method:: get_temperature(kinetics)
-
- 根据速度计算温度。
-
- 参数:
- - **kinetics** (Tensor) - 动力学。
-
- 返回:
- Tensor。温度。
-
- .. py:method:: get_virial(pbc_grad, pbc_box)
-
- 根据周期性边界条件和梯度计算维里。
-
- 参数:
- - **pbc_grad** (Tensor) - 周期性边界条件box的梯度。
- - **pbc_box** (Tensor) - 周期性边界条件box
-
- 返回:
- Tensor。维里。
-
- .. py:method:: get_volume(pbc_box)
-
- 根据周期性边界条件box计算容积。
-
- 参数:
- - **pbc_box** (Tensor) - 用于计算容积的周期性边界条件。
-
- 返回:
- Tensor。容积。
-
- .. py:method:: set_time_step(dt)
-
- 设置模拟单步时间。
-
- 参数:
- - **dt** (float) - 单步时间所需时间。
-
- .. py:method:: set_degrees_of_freedom(dofs)
-
- 设置自由度。
-
- 参数:
- - **dofs** (int) - 自由度。
-
- .. py:method:: update_coordinate(coordinate, success=True)
-
- 更新坐标的参数。
-
- 参数:
- - **coordinate** (Tensor) - 原子的位置坐标。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:True。
-
- 返回:
- bool。是否更新了坐标的参数。
-
- .. py:method:: update_pbc_box(pbc_box, success)
-
- 更新周期性边界条件box。
-
- 参数:
- - **pbc_box** (Tensor) - 周期性边界条件box。
- - **success** (bool, 可选) - 判断是否成功的参数。
-
- 返回:
- bool。是否更新了周期性边界条件box。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/preparing/core/mindsponge.core.RunOneStepCell.rst b/MindSPONGE/docs/api/preparing/core/mindsponge.core.RunOneStepCell.rst
deleted file mode 100644
index 099cb2d74f703e7a1095df70ceff36ce60f4819a..0000000000000000000000000000000000000000
--- a/MindSPONGE/docs/api/preparing/core/mindsponge.core.RunOneStepCell.rst
+++ /dev/null
@@ -1,42 +0,0 @@
-mindsponge.core.RunOneStepCell
-==============================
-
-.. py:class:: mindsponge.core.RunOneStepCell(network: SimulationCell, optimizer: Optimizer, steps: int = 1, sens: float = 1.0)
-
- 运行一步模拟的核心层。
-
- 参数:
- - **network** (SimulationCell) - 模拟系统的网络。
- - **optimizer** (Optimizer) - 模拟优化器。
- - **steps** (int) - JIT的步数。默认值:1。
- - **sens** (float) - 作为反向传播的输入要填充的缩放数。默认值:1.0。
-
- .. py:method:: get_energy_and_force(*inputs)
-
- 获取系统的能量和力。
-
- 返回:
- - Tensor。能量。
- - Tensor。力。
-
- .. py:method:: run_one_step(*inputs)
-
- 运行单步模拟。
-
- 返回:
- - Tensor。模拟层结果输出的能量的大小。
- - Tensor。模拟层结果输出的力的大小。
-
- .. py:method:: set_pbc_grad(value: bool)
-
- 设定是否计算PBC box的梯度。
-
- 参数:
- - **value** (bool) - 判断是否计算PBC box的梯度。
-
- .. py:method:: set_steps(steps: int)
-
- 设置JIT的步数。
-
- 参数:
- - **steps** (int) - JIT的步数。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/preparing/optimizer/mindsponge.optimizer.Updater.rst b/MindSPONGE/docs/api/preparing/optimizer/mindsponge.optimizer.Updater.rst
deleted file mode 100644
index 9b5e63b5cc58813832849fb8c9e37c95ecd79cd3..0000000000000000000000000000000000000000
--- a/MindSPONGE/docs/api/preparing/optimizer/mindsponge.optimizer.Updater.rst
+++ /dev/null
@@ -1,170 +0,0 @@
-mindsponge.optimizer.Updater
-============================
-
-.. py:class:: mindsponge.optimizer.Updater(system, controller=None, time_step=1e-3, velocity=None, weight_decay=0.0, loss_scale=1.0)
-
- 更新空间参数(坐标和周期性边界条件box)的优化器。
-
- 参数:
- - **system** (Molecule) - 模拟系统。
- - **controller** (Controller) - 控制器。默认值:"None"。
- - **time_step** (float) - 单步时间。默认值:1e-3。
- - **velocity** (Tensor) - 速度,shape为(B, A, D)。默认值:"None"。
- - **weight_decay** (float) - 权重衰减值。默认值:0.0。
- - **loss_scale** (float) - 误差比例。默认值:1.0。
-
- 符号:
- - **B** - Batch size。
- - **A** - 原子总数。
- - **D** - 模拟系统的维度,一般为3。
-
- .. py:method:: get_dt()
-
- 获取学习率。
-
- 返回:
- float。当前step的学习率。
-
- .. py:method:: get_kinetics(velocity)
-
- 获取动力学。
-
- 参数:
- - **velocity** (Tensor) - 速度。
-
- 返回:
- Tensor。动力学。
-
- .. py:method:: get_pressure(kinetics, virial, pbc_box)
-
- 获得压力。
-
- 参数:
- - **kinetics** (Tensor) - 动力学。
- - **virial** (Tensor) - 维里。
- - **pbc_box** (Tensor) - 周期性边界条件box。
-
- 返回:
- Tensor。压力。
-
- .. py:method:: get_temperature(kinetics=None)
-
- 获取温度。
-
- 参数:
- - **kinetics** (Tensor) - 动力学。默认值:"None"。
-
- 返回:
- Tensor。温度。
-
- .. py:method:: get_velocity()
-
- 获取速度。
-
- 返回:
- Tensor。速度值。
-
- .. py:method:: get_virial(pbc_grad, pbc_box)
-
- 获取维里。
-
- 参数:
- - **pbc_grad** (Tensor) - 周期性边界条件box的梯度。
- - **pbc_box** (Tensor) - 周期性边界条件box。
-
- 返回:
- Tensor。维里。
-
- .. py:method:: next_step(success=True)
-
- 完成当前优化step并且进行到下一个step。
-
- 参数:
- - **success** (bool) - 是否移动到下一步。
-
- 返回:
- bool。
-
- .. py:method:: set_step(step=0)
-
- 设置步数。
-
- 参数:
- - **step** (int) - 步数。默认值:0。
-
- .. py:method:: update_coordinate(coordinate, success=True)
-
- 更新坐标的参数。
-
- 参数:
- - **coordinate** (Tensor) - 原子的位置坐标。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:"True"。
-
- 返回:
- bool。是否更新了坐标的参数。
-
- .. py:method:: update_kinetics(kinetics, success=True)
-
- 更新动力学参数。
-
- 参数:
- - **kinetics** (Tensor) - 动力学。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:"True"。
-
- 返回:
- bool。是否更新了动力学参数。
-
- .. py:method:: update_pbc_box(pbc_box, success=True)
-
- 更新周期性边界条件box。
-
- 参数:
- - **pbc_box** (Tensor) - 周期性边界条件box。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:"True"。
-
- 返回:
- bool。是否更新了周期性边界条件box。
-
- .. py:method:: update_pressure(pressure, success=True)
-
- 更新压力参数。
-
- 参数:
- - **pressure** (Tensor) - 压力。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:"True"。
-
- 返回:
- bool。是否更新了压力参数。
-
- .. py:method:: update_temperature(temperature, success=True)
-
- 更新温度参数。
-
- 参数:
- - **temperature** (Tensor) - 温度。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:"True"。
-
- 返回:
- bool。是否更新了温度参数。
-
- .. py:method:: update_velocity(velocity, success=True)
-
- 更新速度参数。
-
- 参数:
- - **velocity** (Tensor) - 速度。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:"True"。
-
- 返回:
- bool。是否更新了速度参数。
-
- .. py:method:: update_virial(virial, success=True)
-
- 更新维里参数。
-
- 参数:
- - **virial** (Tensor) - 维里。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:"True"。
-
- 返回:
- bool。是否更新了维里参数。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/preparing/potential/mindsponge.potential.EnergyCell.rst b/MindSPONGE/docs/api/preparing/potential/mindsponge.potential.EnergyCell.rst
deleted file mode 100644
index 7b04f5fca4241d3b30cfa2c4c444451c8f75cde2..0000000000000000000000000000000000000000
--- a/MindSPONGE/docs/api/preparing/potential/mindsponge.potential.EnergyCell.rst
+++ /dev/null
@@ -1,58 +0,0 @@
-mindsponge.potential.EnergyCell
-===============================
-
-.. py:class:: mindsponge.potential.EnergyCell(label, output_dim=1, length_unit="nm", energy_unit="kj/mol", units=None, use_pbc=None)
-
- 能量项的基础层。
-
- 参数:
- - **label** (str) - 能量的标签名称。
- - **output_dim** (int) - 输出维度。默认值:1。
- - **length_unit** (str) - 位置坐标的长度单位。默认值:"nm"。
- - **energy_unit** (str) - 能量单位。默认值:"kj/mol"。
- - **units** (Units) - 长度和能量单位。默认值:"None"。
- - **use_pbc** (bool) - 是否使用PBC。默认值:"None"。
-
- 输出:
- Tensor。能量,shape为(B, 1),数据类型为float。
-
- .. py:method:: convert_energy_from(unit)
-
- 将能量从外部单元转换到内部单元。
-
- 参数:
- - **unit** (str) - 长度和能量的单位。
-
- 返回:
- float。从外部单元转换到内部单元的能量。
-
- .. py:method:: convert_energy_to(unit)
-
- 将能量从内部单元转换到外部单元。
-
- 参数:
- - **unit** (str) - 长度和能量的单位。
-
- 返回:
- float。从内部单元转换到外部单元的能量。
-
- .. py:method:: set_cutoff(cutoff)
-
- 设置中断距离。
-
- 参数:
- - **cutoff** (float) - 中断距离。
-
- .. py:method:: set_input_unit(units)
-
- 设置输入坐标的长度单位。
-
- 参数:
- - **units** (Units) - 长度和能量的单位。
-
- .. py:method:: set_pbc(use_pbc)
-
- 设置是否使用PBC。
-
- 参数:
- - **use_pbc** (bool, 可选) - 是否使用PBC。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/preparing/potential/mindsponge.potential.PotentialCell.rst b/MindSPONGE/docs/api/preparing/potential/mindsponge.potential.PotentialCell.rst
deleted file mode 100644
index b86ad679f597e6f4d2e3eada9858d72f0732f81a..0000000000000000000000000000000000000000
--- a/MindSPONGE/docs/api/preparing/potential/mindsponge.potential.PotentialCell.rst
+++ /dev/null
@@ -1,45 +0,0 @@
-mindsponge.potential.PotentialCell
-==================================
-
-.. py:class:: mindsponge.potential.PotentialCell(cutoff=None, exclude_index=None, length_unit=None, energy_unit=None, units=None, use_pbc=None)
-
- 势能的基础单元。
-
- 参数:
- - **cutoff** (float) - 中断距离。默认值:"None"。
- - **exclude_index** (Tensor) - 应从无键相互作用中被排除的原子索引,shape为(B, A, Ex),数据类型为int。默认值:"None"。
- - **length_unit** (str) - 位置坐标的长度单位。默认值:"None"。
- - **energy_unit** (str) - 能量单位。默认值:"None"。
- - **units** (Units) - 长度和能量单位。默认值:"None"。
- - **use_pbc** (bool, 可选) - 是否使用周期性边界条件。如果为None,则不使用周期性边界条件。默认值:"None"。
-
- 输出:
- Tensor。势,shape为(B, 1)。数据类型为float。
-
- .. py:method:: exclude_index()
-
- 排除索引。
-
- 返回:
- Tensor。排除索引。
-
- .. py:method:: set_cutoff(cutoff)
-
- 设置中断距离。
-
- 参数:
- - **cutoff** (Tensor) - 中断距离。
-
- .. py:method:: set_exclude_index(exclude_index)
-
- 设置排除索引。
-
- 参数:
- - **exclude_index** (Tensor) - 应该从非键相互作用中被排除的原子的索引。
-
- .. py:method:: set_pbc(use_pbc)
-
- 设置是否使用周期性边界条件PBC。
-
- 参数:
- - **use_pbc** (bool, 可选) - 是否使用PBC。
\ No newline at end of file
diff --git a/MindSPONGE/docs/api/preparing/system/mindsponge.system.Molecule.rst b/MindSPONGE/docs/api/preparing/system/mindsponge.system.Molecule.rst
deleted file mode 100644
index 2c60ac3cc0151265df8486bb2605527e8688cae6..0000000000000000000000000000000000000000
--- a/MindSPONGE/docs/api/preparing/system/mindsponge.system.Molecule.rst
+++ /dev/null
@@ -1,265 +0,0 @@
-mindsponge.system.Molecule
-==========================
-
-.. py:class:: mindsponge.system.Molecule(atoms=None, atom_name=None, atom_type=None, atom_mass=None, atom_charge=None, atomic_number=None, bond=None, coordinate=None, pbc_box=None, template=None, residue=None, length_unit=None)
-
- 分子体系层。
-
- 参数:
- - **atoms** (list) - 体系中的原子。默认值:"None"。
- - **atom_name** (list) - 原子名称。默认值:"None"。
- - **atom_type** (list) - 原子种类。默认值:"None"。
- - **atom_mass** (Tensor) - 原子质量,shape为(B, A)。默认值:"None"。
- - **atom_charge** (Tensor) - 原子电荷数,shape为(B, A)。默认值:"None"。
- - **atomic_number** (Tensor) - 原子序数,shape为(B, A)。默认值:"None"。
- - **bond** (Tensor) - 边的索引,shape为(B, b, 2)或者(1, b, 2)。默认值:"None"。
- - **coordinate** (Tensor) - 原子位置坐标,shape为(B, A, D)或者(1, A, D)。默认值:"None"。
- - **pbc_box** (Tensor) - 周期性边界条件的box,shape为(B, D)或者(1, D)。默认值:"None"。
- - **template** (Union[dict, str]) - 残基的模板。默认值:"None"。
- - **residue** (Union[dict, str]) - 残基系数。默认值:"None"。
- - **length_unit** (str) - 位置坐标的长度单位。默认值:"None"。
-
- 符号:
- - **B** - Batch size。
- - **A** - 原子数量。
- - **b** - 边数量。
- - **D** - 模拟体系的维度,一般为3。
-
- .. py:method:: add_residue(residue, coordinate=None)
-
- 增加残基。
-
- 参数:
- - **residue** (Union[Residue, list]) - 残基参数。
- - **coordinate** (Tensor) - 原子的位置坐标,shape为(B, A, D)或者(1, A, D)。默认值:"None"。
-
- .. py:method:: append(system)
-
- 添加系统。
-
- 参数:
- - **system** (Molecule) - 系统参数。
-
- .. py:method:: build_atom_charge()
-
- 构建原子电荷数。
-
- .. py:method:: build_atom_type()
-
- 构建原子种类。
-
- .. py:method:: build_space(coordinate, pbc_box=None)
-
- 构建坐标系和周期性边界条件box。
-
- 参数:
- - **coordinate** (Tensor) - 原子的位置坐标。
- - **pbc_box** (Tensor) - 周期性边界条件box。默认值:"None"。
-
- .. py:method:: build_system()
-
- 通过残基构建系统。
-
- .. py:method:: calc_image(shift=0.0)
-
- 计算坐标图。
-
- 参数:
- - **shift** (float) - 转换参数。默认值:0.0。
-
- 返回:
- Tensor。坐标图。
-
- .. py:method:: coordinate_in_box(shift=0)
-
- 获取整个周期性边界条件box中的坐标。
-
- 参数:
- - **shift** (float) - 转换参数。默认值:0.0。
-
- 返回:
- Tensor。整个周期性边界条件box中的坐标。
-
- .. py:method:: copy(shift=None)
-
- 返回一个复制当前分子参数的分子。
-
- 参数:
- - **shift** (Tensor) - 转换参数。默认值:"None"。
-
- .. py:method:: get_coordinate()
-
- 获取坐标的Tensor。
-
- 返回:
- Tensor。坐标的Tensor。
-
- .. py:method:: get_pbc_box()
-
- 获取周期性边界条件box。
-
- 返回:
- Tensor。周期性边界条件box。
-
- .. py:method:: get_volume()
-
- 获得系统的容积。
-
- 返回:
- Tensor。系统的容积。
-
- .. py:method:: move(shift=None)
-
- 移动系统的坐标。
-
- 参数:
- - **shift** (Tensor) - 转换参数。默认值:"None"。
-
- .. py:method:: reduplicate(shift)
-
- 复制系统让其扩大到原来的两倍。
-
- 参数:
- - **shift** (Tensor) - 转换参数。
-
- .. py:method:: repeat_box(lattices)
-
- 根据周期性边界条件的box的格点重复系统。
-
- 参数:
- - **lattices** (list) - 格点参数。
-
- .. py:method:: residue_bond(res_id)
-
- 获得残基的边的索引。
-
- 参数:
- - **res_id** (int) - 残基ID参数。
-
- 返回:
- Tensor。残基的边的索引。
-
- .. py:method:: residue_coordinate(res_id)
-
- 获得残基坐标。
-
- 参数:
- - **res_id** (int) - 残基ID参数。
-
- 返回:
- Tensor。残基的边的索引。
-
- .. py:method:: residue_head(res_id)
-
- 获取残基的头索引。
-
- 参数:
- - **res_id** (int) - 残基ID参数。
-
- 返回:
- Tensor。残基的头索引。
-
- .. py:method:: residue_index(res_id)
-
- 获得残基索引。
-
- 参数:
- - **res_id** (int) - 残基ID参数。
-
- 返回:
- Tensor。残基的索引。
-
- .. py:method:: residue_tail(res_id)
-
- 获得残基的尾索引。
-
- 参数:
- - **res_id** (int) - 残基ID参数。
-
- 返回:
- Tensor。残基的尾索引。
-
- .. py:method:: set_bond_length(bond_length)
-
- 设置边的长度。
-
- 参数:
- - **bond_length** (Tensor) - 边的长度。
-
- .. py:method:: set_coordianate(coordinate)
-
- 设定坐标的值。
-
- 参数:
- - **coordianate** (Tensor) - 原子的位置坐标。
-
- .. py:method:: set_length_unit(unit)
-
- 设定系统的长度单位。
-
- 参数:
- - **unit** (Units) - 长度单位。
-
- .. py:method:: set_pbc_box(pbc_box=None)
-
- 设置周期性边界条件box。
-
- 参数:
- - **pbc_box** (Tensor) - 周期性边界条件box。默认值:"None"。
-
- .. py:method:: set_pbc_grad(grad_box)
-
- 设置是否计算周期性边界条件box的梯度。
-
- 参数:
- - **grad_box** (bool) - 是否计算周期性边界条件box的梯度。
-
- .. py:method:: space_parameters()
-
- 获取空间的参数(坐标和周期性边界条件box)。
-
- 返回:
- list。空间参数的list。
-
- .. py:method:: trainable_params(recurse=True)
-
- 获取可训练参数。
-
- 参数:
- - **recurse** (bool, 可选) - 递归参数。默认值:"True"。
-
- 返回:
- list。可训练参数list。
-
- .. py:method:: update_coordinate(coordinate, success=True)
-
- 更新坐标的参数。
-
- 参数:
- - **coordinate** (Tensor) - 原子的位置坐标。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:"True"。
-
- 返回:
- bool。是否更新了坐标的参数。
-
- .. py:method:: update_image(image=None, success=True)
-
- 更新坐标图。
-
- 参数:
- - **image** (Tensor) - 图参数。默认值:"None"。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:"True"。
-
- 返回:
- bool。
-
- .. py:method:: update_pbc_box(pbc_box, success=True)
-
- 更新周期性边界条件box。
-
- 参数:
- - **pbc_box** (Tensor) - 周期性边界条件box,shape为(B, D)或者(1, D)。
- - **success** (bool, 可选) - 判断是否成功的参数。默认值:"True"。
-
- 返回:
- bool。是否更新了周期性边界条件box。
\ No newline at end of file
diff --git "a/MindSPONGE/docs/cooperative_partner/\345\214\227\344\272\254\345\244\247\345\255\246.png" "b/MindSPONGE/docs/cooperative_partner/\345\214\227\344\272\254\345\244\247\345\255\246.png"
index d65972660d3de557f36d6d71ff9123ed204eb33c..8307c6e33000e755db570a37ad024e5cb2cf4989 100644
Binary files "a/MindSPONGE/docs/cooperative_partner/\345\214\227\344\272\254\345\244\247\345\255\246.png" and "b/MindSPONGE/docs/cooperative_partner/\345\214\227\344\272\254\345\244\247\345\255\246.png" differ
diff --git "a/MindSPONGE/docs/cooperative_partner/\346\267\261\345\234\263\346\271\276.jpg" "b/MindSPONGE/docs/cooperative_partner/\346\267\261\345\234\263\346\271\276.jpg"
index 32db893d8e5243b84845d1faff7669b765175ef0..1d533df39c967d433f2c649f85422e1c8c4814c7 100644
Binary files "a/MindSPONGE/docs/cooperative_partner/\346\267\261\345\234\263\346\271\276.jpg" and "b/MindSPONGE/docs/cooperative_partner/\346\267\261\345\234\263\346\271\276.jpg" differ
diff --git "a/MindSPONGE/docs/cooperative_partner/\350\245\277\347\224\265.png" "b/MindSPONGE/docs/cooperative_partner/\350\245\277\347\224\265.png"
index 91808694136d438aaea3952519696638c7af4614..02d349471ad308575f1351ee1e66d5fa7788d1d2 100644
Binary files "a/MindSPONGE/docs/cooperative_partner/\350\245\277\347\224\265.png" and "b/MindSPONGE/docs/cooperative_partner/\350\245\277\347\224\265.png" differ
diff --git a/MindSPONGE/docs/modelcards/ColabDesign.png b/MindSPONGE/docs/modelcards/ColabDesign.png
new file mode 100644
index 0000000000000000000000000000000000000000..c31d9cb1b20f05763f80366767d28db5e0f1b8c3
Binary files /dev/null and b/MindSPONGE/docs/modelcards/ColabDesign.png differ
diff --git a/MindSPONGE/docs/modelcards/DeepFRI.PNG b/MindSPONGE/docs/modelcards/DeepFRI.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..dcd156066335d7bb8be468f4db1d1169cc70be21
Binary files /dev/null and b/MindSPONGE/docs/modelcards/DeepFRI.PNG differ
diff --git a/MindSPONGE/docs/modelcards/GROVER.PNG b/MindSPONGE/docs/modelcards/GROVER.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..86bc9f72c9e350b281fed36f16ee8da8b70ebdcb
Binary files /dev/null and b/MindSPONGE/docs/modelcards/GROVER.PNG differ
diff --git a/MindSPONGE/docs/modelcards/GVP.PNG b/MindSPONGE/docs/modelcards/GVP.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..e43b33a7278e14aa90ec7749366e5ae3b918a320
Binary files /dev/null and b/MindSPONGE/docs/modelcards/GVP.PNG differ
diff --git a/MindSPONGE/docs/modelcards/GraphDTA.JPG b/MindSPONGE/docs/modelcards/GraphDTA.JPG
new file mode 100644
index 0000000000000000000000000000000000000000..106824dd5139193a90701fe8ef95f2c50962e79b
Binary files /dev/null and b/MindSPONGE/docs/modelcards/GraphDTA.JPG differ
diff --git a/MindSPONGE/docs/modelcards/MEGA-EvoGen.png b/MindSPONGE/docs/modelcards/MEGA-EvoGen.png
new file mode 100644
index 0000000000000000000000000000000000000000..f0eaab6c0795ba3222864cee6cafc3d01cb647d4
Binary files /dev/null and b/MindSPONGE/docs/modelcards/MEGA-EvoGen.png differ
diff --git a/MindSPONGE/docs/modelcards/MGBERT.PNG b/MindSPONGE/docs/modelcards/MGBERT.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..b549befa71cd887f15dce5b2997c1eeaac6061a2
Binary files /dev/null and b/MindSPONGE/docs/modelcards/MGBERT.PNG differ
diff --git a/MindSPONGE/docs/modelcards/ProteinMPNN.PNG b/MindSPONGE/docs/modelcards/ProteinMPNN.PNG
new file mode 100644
index 0000000000000000000000000000000000000000..134eb0f89ee232b859b376118c0b16571cab15c8
Binary files /dev/null and b/MindSPONGE/docs/modelcards/ProteinMPNN.PNG differ
diff --git a/MindSPONGE/docs/modelcards/illustration.png b/MindSPONGE/docs/modelcards/illustration.png
new file mode 100644
index 0000000000000000000000000000000000000000..2d24b0ce90fc424bc01e8e8330f61582052090f6
Binary files /dev/null and b/MindSPONGE/docs/modelcards/illustration.png differ
diff --git a/MindSPONGE/docs/modelcards/node_gtransformer.png b/MindSPONGE/docs/modelcards/node_gtransformer.png
new file mode 100644
index 0000000000000000000000000000000000000000..cbde9e1be58a86258d76b81667413cead759f0bb
Binary files /dev/null and b/MindSPONGE/docs/modelcards/node_gtransformer.png differ
diff --git a/MindSPONGE/docs/modelcards/structure_transformer.png b/MindSPONGE/docs/modelcards/structure_transformer.png
new file mode 100644
index 0000000000000000000000000000000000000000..322285daf711c11264d91c4df5c52ed31cac068e
Binary files /dev/null and b/MindSPONGE/docs/modelcards/structure_transformer.png differ
diff --git a/MindSPONGE/mindsponge/python/__init__.py b/MindSPONGE/mindsponge/python/__init__.py
index 998b5e17325189259f7757cbe70492b247cd580f..971330b0005df230a07acde2b966967e8999263d 100644
--- a/MindSPONGE/mindsponge/python/__init__.py
+++ b/MindSPONGE/mindsponge/python/__init__.py
@@ -23,6 +23,7 @@
"""MindSPONGE"""
import time
+from distutils.version import LooseVersion
def _mindspore_version_check():
@@ -45,15 +46,10 @@ def _mindspore_version_check():
"MindSpore before using MindSpore Mindsponge, by following "
"the instruction at https://www.mindspore.cn/install")
- ms_version = ms.__version__[:5]
+ ms_version = ms.__version__
required_mindspore_version = '2.0.0'
logger.info("Current Mindspore version is {}".format(ms_version))
- ms_version = list(map(int, ms_version.split('.')))
- required_mindspore = list(map(int, required_mindspore_version.split('.')))
- max_len = max(len(ms_version), len(required_mindspore))
- ms_version += [0] * (max_len - len(ms_version))
- required_mindspore += [0] * (max_len - len(required_mindspore))
- if ms_version < required_mindspore:
+ if LooseVersion(ms_version) < LooseVersion(required_mindspore_version):
logger.warning("Current version of MindSpore is not compatible with MindSPONGE. "
"Some functions might not work or even raise error. Please install MindSpore "
"version >= {} For more details about dependency setting, please check "
diff --git a/MindSPONGE/mindsponge/python/cell/__init__.py b/MindSPONGE/mindsponge/python/cell/__init__.py
index 67530492c69c94f76bc9625070695c1a20d8a481..ccb06b1c2f0a2b8928bdd5e18538606e44875c0c 100644
--- a/MindSPONGE/mindsponge/python/cell/__init__.py
+++ b/MindSPONGE/mindsponge/python/cell/__init__.py
@@ -1,20 +1,7 @@
-# Copyright 2021 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""init"""
+'''init'''
from .basic import Attention, GlobalAttention
-from .msa import MSARowAttentionWithPairBias, MSAColumnAttention, MSAColumnGlobalAttention
+from .msa import MSARowAttentionWithPairBias, MSAColumnAttention, MSAColumnGlobalAttention, \
+ MSARowAttentionWithPairBiasContact
from .triangle import TriangleAttention, TriangleMultiplication, OuterProductMean
from .equivariant import InvariantPointAttention
from .transition import Transition
@@ -22,4 +9,4 @@ from .transition import Transition
__all__ = ['Attention', 'GlobalAttention', 'MSARowAttentionWithPairBias',
'MSAColumnAttention', 'MSAColumnGlobalAttention',
'TriangleAttention', 'TriangleMultiplication', 'OuterProductMean',
- 'InvariantPointAttention', 'Transition']
+ 'InvariantPointAttention', 'Transition', 'MSARowAttentionWithPairBiasContact']
diff --git a/MindSPONGE/mindsponge/python/cell/basic.py b/MindSPONGE/mindsponge/python/cell/basic.py
index baf2185d8a0138c45a8f170f013b48c5cf3c577f..2c4391422fae7bb0cf249555abcbf24ad477d3b5 100644
--- a/MindSPONGE/mindsponge/python/cell/basic.py
+++ b/MindSPONGE/mindsponge/python/cell/basic.py
@@ -54,7 +54,7 @@ class Attention(nn.Cell):
- **m_data** (Tensor) - The key/value tensor with shape :math:`(batch\_size,
value\_seq_length, m\_data_dim)` with value_seq_length the value sequence length.
- **attention_mask** (Tensor) - The mask for attention matrix with shape
- :math:`(batch\_size, num\_head, query\_seq_length, value_seq_length)`.
+ :math:`(batch\_size, num\_head, query\_seq_length, value\_seq_length)`.
- **index** (Tensor) - The index of while loop, only used in case of while
control flow. Default: ``None``.
- **nonbatched_bias** (Tensor) - Non-batched bias for the attention matrix with
@@ -236,18 +236,18 @@ class GlobalAttention(nn.Cell):
flow. Default: ``None``.
Inputs:
- - **q_data** (Tensor) - The query tensor with shape :math:`(batch_size, seq_length,
- input_dim)` with seq_length the sequence length.
- - **m_data** (Tensor) - The key/value tensor with shape :math:`(batch_size, seq_length,
- input_dim)`.
- - **q_mask** (Tensor) - A binary mask for q_data of shape :math:`(batch_size,
- seq_length, 1)`.
+ - **q_data** (Tensor) - The query tensor with shape :math:`(batch\_size, seq\_length,
+ input\_dim)` with seq_length the sequence length.
+ - **m_data** (Tensor) - The key/value tensor with shape :math:`(batch\_size, seq\_length,
+ input\_dim)`.
+ - **q_mask** (Tensor) - A binary mask for q_data of shape :math:`(batch\_size,
+ seq\_length, 1)`.
- **bias** (Tensor) - Bias for the attention matrix. Default: ``None``.
- **index** (Tensor) - The index of while loop, only used in case of while control
flow. Default: ``None``.
Outputs:
- Tensor, Output tensor of the GlobalAttention layer with shape :math:`(batch_size, seq_length, output_dim)`.
+ Tensor, Output tensor of the GlobalAttention layer with shape :math:`(batch\_size, seq\_length, output\_dim)`.
Supported Platforms:
``Ascend`` ``GPU``
diff --git a/MindSPONGE/mindsponge/python/cell/equivariant.py b/MindSPONGE/mindsponge/python/cell/equivariant.py
index 32d6358eefa0bf8278587a8d5c4bc526e8144e7d..e66cece3127714bfca35efd4b94757c7abc2da17 100644
--- a/MindSPONGE/mindsponge/python/cell/equivariant.py
+++ b/MindSPONGE/mindsponge/python/cell/equivariant.py
@@ -59,9 +59,9 @@ class InvariantPointAttention(nn.Cell):
shape :math:`[N_{res}, N_{res}, pair\_dim]`.
- **mask** (Tensor) - A mask that determines which elements of inputs_1d are involved in the
attention calculation, shape :math:`[N_{res}, 1]`
- - **rotation** (tuple) - A rotation term in a rigid body group :math:`T(r,t)`,
+ - **rotation** (tuple) - A rotation term in a rigid body group T(r,t),
A tuple of length 9, The shape of each elements in the tuple is :math:`[N_{res}]`.
- - **translation** (tuple) - A translation term in a rigid body group :math:`T(r,t)`,
+ - **translation** (tuple) - A translation term in a rigid body group T(r,t),
A tuple of length 3, The shape of each elements in the tuple is :math:`[N_{res}]`.
Outputs:
@@ -72,11 +72,11 @@ class InvariantPointAttention(nn.Cell):
Examples:
>>> import numpy as np
- >>> import mindspore as ms
>>> from mindsponge.cell import InvariantPointAttention
>>> from mindspore import dtype as mstype
>>> from mindspore import Tensor
- >>> ms.set_context(mode=ms.GRAPH_MODE)
+ >>> import mindspore.context as context
+ >>> context.set_context(mode=context.GRAPH_MODE)
>>> model = InvariantPointAttention(num_head=12, num_scalar_qk=16, num_scalar_v=16,
... num_point_v=8, num_point_qk=4,
... num_channel=384, pair_dim=128)
@@ -103,19 +103,19 @@ class InvariantPointAttention(nn.Cell):
self.projection_num = self.num_head * self.num_scalar_v + self.num_head * self.num_point_v * 4 + \
self.num_head * pair_dim
self.q_scalar = nn.Dense(self.num_channel, self.num_head * self.num_scalar_qk,
- weight_init=lecun_init(self.num_channel))
+ weight_init=lecun_init(self.num_channel), bias_init="zeros")
self.kv_scalar = nn.Dense(self.num_channel, self.num_head * (self.num_scalar_qk + self.num_scalar_v),
- weight_init=lecun_init(self.num_channel))
+ weight_init=lecun_init(self.num_channel), bias_init="zeros")
self.q_point_local = nn.Dense(self.num_channel, self.num_head * 3 * self.num_point_qk,
- weight_init=lecun_init(self.num_channel)
+ weight_init=lecun_init(self.num_channel), bias_init="zeros"
)
self.kv_point_local = nn.Dense(self.num_channel, self.num_head * 3 * (self.num_point_qk + self.num_point_v),
- weight_init=lecun_init(self.num_channel))
+ weight_init=lecun_init(self.num_channel), bias_init="zeros")
self.soft_max = nn.Softmax()
self.soft_plus = ops.Softplus()
self.trainable_point_weights = Parameter(Tensor(np.ones((12,)), mstype.float32), name="trainable_point_weights")
- self.attention_2d = nn.Dense(pair_dim, self.num_head, weight_init=lecun_init(pair_dim))
- self.output_projection = nn.Dense(self.projection_num, self.num_channel, weight_init='zeros'
+ self.attention_2d = nn.Dense(pair_dim, self.num_head, weight_init=lecun_init(pair_dim), bias_init='zeros')
+ self.output_projection = nn.Dense(self.projection_num, self.num_channel, weight_init='zeros', bias_init='zeros'
)
self.scalar_weights = Tensor(np.sqrt(1.0 / (3 * 16)).astype(np.float32))
self.point_weights = Tensor(np.sqrt(1.0 / (3 * 18)).astype(np.float32))
@@ -179,15 +179,15 @@ class InvariantPointAttention(nn.Cell):
k_point2, v_point2 = mnp.split(kv_point_global2, [num_point_qk], axis=-1)
trainable_point_weights = self.soft_plus(self.trainable_point_weights)
- point_weights = self.point_weights * mnp.expand_dims(trainable_point_weights, axis=1)
+ point_weights = self.point_weights * ops.expand_dims(trainable_point_weights, axis=1)
v_point = [mnp.swapaxes(v_point0, -2, -3), mnp.swapaxes(v_point1, -2, -3), mnp.swapaxes(v_point2, -2, -3)]
q_point = [mnp.swapaxes(q_point0, -2, -3), mnp.swapaxes(q_point1, -2, -3), mnp.swapaxes(q_point2, -2, -3)]
k_point = [mnp.swapaxes(k_point0, -2, -3), mnp.swapaxes(k_point1, -2, -3), mnp.swapaxes(k_point2, -2, -3)]
- dist2 = mnp.square(ops.expand_dims(q_point[0], 2) - ops.expand_dims(k_point[0], 1)) + \
- mnp.square(ops.expand_dims(q_point[1], 2) - ops.expand_dims(k_point[1], 1)) + \
- mnp.square(ops.expand_dims(q_point[2], 2) - ops.expand_dims(k_point[2], 1))
+ dist2 = ops.Square()(ops.expand_dims(q_point[0], 2) - ops.expand_dims(k_point[0], 1)) + \
+ ops.Square()(ops.expand_dims(q_point[1], 2) - ops.expand_dims(k_point[1], 1)) + \
+ ops.Square()(ops.expand_dims(q_point[2], 2) - ops.expand_dims(k_point[2], 1))
attn_qk_point = -0.5 * mnp.sum(ops.expand_dims(ops.expand_dims(point_weights, 1), 1) * dist2, axis=-1)
@@ -230,9 +230,9 @@ class InvariantPointAttention(nn.Cell):
output_feature22 = result_point_local[2]
output_feature3 = mnp.sqrt(self._dist_epsilon +
- mnp.square(result_point_local[0]) +
- mnp.square(result_point_local[1]) +
- mnp.square(result_point_local[2]))
+ ops.Square()(result_point_local[0]) +
+ ops.Square()(result_point_local[1]) +
+ ops.Square()(result_point_local[2]))
result_attention_over_2d = ops.matmul(mnp.swapaxes(attn, 0, 1), inputs_2d)
num_out = num_head * result_attention_over_2d.shape[-1]
diff --git a/MindSPONGE/mindsponge/python/cell/msa.py b/MindSPONGE/mindsponge/python/cell/msa.py
index 50d0f313fefa624d9a2afedd8935e4e0b162a04d..330850b43281ac345bb05d43389caa12beb975a2 100644
--- a/MindSPONGE/mindsponge/python/cell/msa.py
+++ b/MindSPONGE/mindsponge/python/cell/msa.py
@@ -357,3 +357,93 @@ class MSAColumnGlobalAttention(nn.Cell):
"""
msa_act = self.attn_mod(msa_act, msa_act, msa_mask, index)
return msa_act
+
+
+class MSARowAttentionWithPairBiasContact(nn.Cell):
+ '''MSA row attention'''
+
+ def __init__(self, num_head, key_dim, gating, msa_act_dim, pair_act_dim, batch_size=None, slice_num=0):
+ super(MSARowAttentionWithPairBiasContact, self).__init__()
+ self.num_head = num_head
+ self.batch_size = batch_size
+ self.norm = P.LayerNorm(begin_norm_axis=-1, begin_params_axis=-1, epsilon=1e-5)
+ self.matmul = P.MatMul(transpose_b=True)
+ self.attn_mod = Attention(num_head, key_dim, gating, msa_act_dim, msa_act_dim, msa_act_dim, batch_size)
+ self.msa_act_dim = msa_act_dim
+ self.pair_act_dim = pair_act_dim
+ self.batch_size = batch_size
+ self.slice_num = slice_num
+ self.idx = Tensor(0, mstype.int32)
+ self.masked_layer_norm = MaskedLayerNorm()
+ self._init_parameter()
+
+ def construct(self, msa_act, msa_mask, pair_act, contact_act, contact_info_mask, index):
+ '''construct'''
+ query_norm_gamma = P.Gather()(self.query_norm_gammas, index, 0)
+ query_norm_beta = P.Gather()(self.query_norm_betas, index, 0)
+ feat_2d_norm_gamma = P.Gather()(self.feat_2d_norm_gammas, index, 0)
+ feat_2d_norm_beta = P.Gather()(self.feat_2d_norm_betas, index, 0)
+ feat_2d_weight = P.Cast()(P.Gather()(self.feat_2d_weights, index, 0), mstype.float16)
+ contact_norm_gamma = P.Gather()(self.contact_norm_gammas, index, 0)
+ contact_norm_beta = P.Gather()(self.contact_norm_betas, index, 0)
+ contact_weight = P.Cast()(P.Gather()(self.contact_weights, index, 0), mstype.float16)
+
+ q, k, _ = pair_act.shape
+ msa_mask = P.Cast()(msa_mask, mstype.float32)
+ bias = 1e9 * (msa_mask - 1.0)
+ bias = P.ExpandDims()(P.ExpandDims()(bias, 1), 2)
+
+ msa_act = P.Cast()(msa_act, mstype.float32)
+ pair_act = P.Cast()(pair_act, mstype.float32)
+ msa_act, _, _ = self.norm(msa_act, query_norm_gamma, query_norm_beta)
+ pair_act, _, _ = self.norm(pair_act, feat_2d_norm_gamma, feat_2d_norm_beta)
+ msa_act = P.Cast()(msa_act, mstype.float16)
+ pair_act = P.Cast()(pair_act, mstype.float16)
+ pair_act = P.Reshape()(pair_act, (-1, pair_act.shape[-1]))
+ pair_act_bias = P.Transpose()(P.Reshape()(self.matmul(pair_act, feat_2d_weight), (q, k, self.num_head)),
+ (2, 0, 1))
+
+ contact_act = P.Cast()(contact_act, mstype.float32)
+ contact_act, _, _ = self.norm(contact_act, contact_norm_gamma, contact_norm_beta)
+ contact_act = P.Cast()(contact_act, mstype.float16)
+ contact_act = P.Reshape()(contact_act, (-1, contact_act.shape[-1]))
+ contact_act_bias = P.Transpose()(P.Reshape()(self.matmul(contact_act, contact_weight), (q, k, self.num_head)),
+ (2, 0, 1))
+ contact_act_bias = contact_act_bias * contact_info_mask[None, :, :]
+
+ nonbatched_bias = pair_act_bias + contact_act_bias
+ batched_inputs = (msa_act, bias)
+
+ nonbatched_inputs = (index, nonbatched_bias)
+
+ msa_act = _memory_reduce(self._compute, batched_inputs, nonbatched_inputs, self.slice_num)
+ return msa_act
+
+ def _init_parameter(self):
+ '''init parameter'''
+ self.query_norm_gammas = Parameter(Tensor(np.zeros([self.batch_size, self.msa_act_dim,]), mstype.float32))
+ self.query_norm_betas = Parameter(Tensor(np.zeros([self.batch_size, self.msa_act_dim,]), mstype.float32))
+ self.feat_2d_norm_gammas = Parameter(Tensor(np.zeros([self.batch_size, self.pair_act_dim,]), mstype.float32))
+ self.feat_2d_norm_betas = Parameter(Tensor(np.zeros([self.batch_size, self.pair_act_dim,]), mstype.float32))
+ self.feat_2d_weights = Parameter(
+ Tensor(np.zeros([self.batch_size, self.num_head, self.pair_act_dim]), mstype.float32))
+
+ self.contact_norm_gammas = Parameter(Tensor(np.ones([self.batch_size, 32,]), mstype.float32))
+ self.contact_norm_betas = Parameter(Tensor(np.zeros([self.batch_size, 32,]), mstype.float32))
+ self.contact_weights = Parameter(Tensor(np.zeros([self.batch_size, self.num_head, 32]), mstype.float32))
+
+ def _compute(self, msa_act, mask, index, nonbatched_bias):
+ """
+ compute.
+
+ Args:
+ msa_act (Tensor): Tensor of msa_act.
+ mask (Tensor): The mask for MSA row attention matrix.
+ index (Tensor): The index of while loop, only used in case of while control flow. Default: None
+ nonbatched_bias(Tensor): Tensor of non batched bias matrix.
+
+ Outputs:
+ - **msa_act** (Tensor)- Tensor, the float tensor of the msa_act of the attention layer.
+ """
+ msa_act = self.attn_mod(msa_act, msa_act, mask, index, nonbatched_bias)
+ return msa_act
diff --git a/MindSPONGE/mindsponge/python/cell/triangle.py b/MindSPONGE/mindsponge/python/cell/triangle.py
index 12185cbbb40eeab6bb9dd44823ae8d6d25f06a8e..8e3ce20e9779b95fa4e0d0256d76cca5262c3247 100644
--- a/MindSPONGE/mindsponge/python/cell/triangle.py
+++ b/MindSPONGE/mindsponge/python/cell/triangle.py
@@ -156,13 +156,13 @@ class TriangleMultiplication(nn.Cell):
batch_size (int): The batch size of parameters in triangle multiplication. Default: ``None``.
Inputs:
- - **pair_act** (Tensor) - Tensor of pair_act. shape :math:`(N{res}, N{res}, layer\_norm\_dim)`.
- - **pair_mask** (Tensor) - The mask for TriangleAttention matrix with shape. shape :math:`(N{res}, N{res})`.
+ - **pair_act** (Tensor) - Tensor of pair_act. shape :math:`(N_{res}, N_{res}, layer\_norm\_dim)`.
+ - **pair_mask** (Tensor) - The mask for TriangleAttention matrix with shape. shape :math:`(N_{res}, N_{res})`.
- **index** (Tensor) - The index of while loop, only used in case of while control
flow.
Outputs:
- Tensor, the float tensor of the pair_act of the layer with shape :math:`(N{res}, N{res}, layer\_norm\_dim)`.
+ Tensor, the float tensor of the pair_act of the layer with shape :math:`(N_{res}, N_{res}, layer\_norm\_dim)`.
Supported Platforms:
``Ascend`` ``GPU``
@@ -329,9 +329,9 @@ class TriangleMultiplication(nn.Cell):
self.right_gate_biases = Parameter(
Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32))
self.center_layer_norm_gammas = Parameter(
- Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32))
+ Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32))
self.center_layer_norm_betas = Parameter(
- Tensor(np.zeros((self.batch_size, self.layer_norm_dim)), mstype.float32))
+ Tensor(np.zeros((self.batch_size, self.num_intermediate_channel)), mstype.float32))
self.output_projection_weights = Parameter(
Tensor(np.zeros((self.batch_size, self.layer_norm_dim, self.layer_norm_dim)), mstype.float32))
self.output_projection_biases = Parameter(
@@ -359,8 +359,8 @@ class TriangleMultiplication(nn.Cell):
self.right_gate_weights = Parameter(
Tensor(np.zeros((self.num_intermediate_channel, self.layer_norm_dim)), mstype.float32))
self.right_gate_biases = Parameter(Tensor(np.ones((self.num_intermediate_channel)), mstype.float32))
- self.center_layer_norm_gammas = Parameter(Tensor(np.ones((self.layer_norm_dim)), mstype.float32))
- self.center_layer_norm_betas = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32))
+ self.center_layer_norm_gammas = Parameter(Tensor(np.ones((self.num_intermediate_channel)), mstype.float32))
+ self.center_layer_norm_betas = Parameter(Tensor(np.zeros((self.num_intermediate_channel)), mstype.float32))
self.output_projection_weights = Parameter(
Tensor(np.zeros((self.layer_norm_dim, self.layer_norm_dim)), mstype.float32))
self.output_projection_biases = Parameter(Tensor(np.zeros((self.layer_norm_dim)), mstype.float32))
diff --git a/MindSPONGE/mindsponge/python/colvar/atoms/group.py b/MindSPONGE/mindsponge/python/colvar/atoms/group.py
index d87924fb23f4f7643bfeb24469ace6d6f7685806..b69b6cb03a3f1fc7e86009aaaab7277ecff2d1bc 100644
--- a/MindSPONGE/mindsponge/python/colvar/atoms/group.py
+++ b/MindSPONGE/mindsponge/python/colvar/atoms/group.py
@@ -114,7 +114,7 @@ class Group(AtomsBase):
atoms_.append(a)
- self.atoms = CellList(atoms_)
+ self.atoms: List[AtomsBase] = CellList(atoms_)
shape = shape[:axis] + (dim,) + shape[axis:]
self._shape = shape[1:]
diff --git a/MindSPONGE/mindsponge/python/colvar/basic/angle.py b/MindSPONGE/mindsponge/python/colvar/basic/angle.py
index 09e0d991ac66e120bc23d8b0ce07c372f86aad97..157896f989df249da2de5929da46c00a454cefc3 100644
--- a/MindSPONGE/mindsponge/python/colvar/basic/angle.py
+++ b/MindSPONGE/mindsponge/python/colvar/basic/angle.py
@@ -24,6 +24,8 @@
Collective variables by position
"""
+from inspect import signature
+
from mindspore import Tensor
from mindspore import ops, nn
from mindspore.ops import functional as F
@@ -115,6 +117,7 @@ class Angle(Colvar):
raise ValueError('The atoms and vector cannot be used at same time!')
axis = get_integer(axis)
+ self.keepdims = keepdims
self.atoms = None
self.vector1 = None
@@ -161,40 +164,44 @@ class Angle(Colvar):
# (..., D)
shape = check_broadcast(self.vector1.shape, self.vector2.shape)
- if keepdims is None:
+ if self.keepdims is None:
if len(shape) > 1:
- keepdims = False
+ self.keepdims = False
else:
- keepdims = True
+ self.keepdims = True
# (...)
shape = shape[:-1]
- if keepdims:
+ if self.keepdims:
# (..., 1)
shape += (1,)
self._set_shape(shape)
else:
- if keepdims is None:
+ if self.keepdims is None:
if self.atoms.ndim > 2:
- keepdims = False
+ self.keepdims = False
else:
- keepdims = True
+ self.keepdims = True
# (1, ..., 3, D)
shape = (1,) + self.atoms.shape
# (1, ..., D)
shape = shape[:axis] + shape[axis+1:]
# (...)
shape = shape[1:-1]
- if keepdims:
+ if self.keepdims:
# (..., 1)
shape += (1,)
self._set_shape(shape)
self.squeeze = ops.Squeeze(axis)
- self.norm = nn.Norm(-1, keepdims)
- self.reduce_sum = ops.ReduceSum(keepdims)
+ self.norm_last_dim = None
+ # MindSpore < 2.0.0-rc1
+ if 'ord' not in signature(ops.norm).parameters.keys():
+ self.norm_last_dim = nn.Norm(-1, self.keepdims)
+
+ self.reduce_sum = ops.ReduceSum(self.keepdims)
def construct(self, coordinate: Tensor, pbc_box: bool = None):
r"""calculate angle.
@@ -228,8 +235,12 @@ class Angle(Colvar):
vector2 = self.get_vector(pos_b, pos_c, pbc_box)
# (B, ...) or (B, ..., 1) <- (B, ..., D)
- dis1 = self.norm(vector1)
- dis2 = self.norm(vector2)
+ if self.norm_last_dim is None:
+ dis1 = ops.norm(vector1, None, -1, self.keepdims)
+ dis2 = ops.norm(vector2, None, -1, self.keepdims)
+ else:
+ dis1 = self.norm_last_dim(vector1)
+ dis2 = self.norm_last_dim(vector2)
dot12 = self.reduce_sum(vector1*vector2, -1)
# (B, ...) or (B, ..., 1)
diff --git a/MindSPONGE/mindsponge/python/colvar/basic/distance.py b/MindSPONGE/mindsponge/python/colvar/basic/distance.py
index 987d46c6b557fae65478bbb5ff126ef8d77afce8..aa6920a6e69484783631b112535262ee7e232481 100644
--- a/MindSPONGE/mindsponge/python/colvar/basic/distance.py
+++ b/MindSPONGE/mindsponge/python/colvar/basic/distance.py
@@ -24,8 +24,10 @@
Collective variables by position
"""
+from inspect import signature
+
from mindspore import Tensor
-from mindspore import nn
+from mindspore import nn, ops
from ..colvar import Colvar
from ..atoms import AtomsBase, Vector
@@ -124,7 +126,12 @@ class Distance(Colvar):
shape += (1,)
self._set_shape(shape)
- self.norm_last_dim = nn.Norm(-1, keepdims)
+ self.keepdims = keepdims
+
+ self.norm_last_dim = None
+ # MindSpore < 2.0.0-rc1
+ if 'ord' not in signature(ops.norm).parameters.keys():
+ self.norm_last_dim = nn.Norm(-1, self.keepdims)
def get_unit(self, units: Units = None) -> str:
"""return unit of the collective variables"""
@@ -147,4 +154,7 @@ class Distance(Colvar):
vector = self.vector(coordinate, pbc_box)
# (B, ...) or (B, ..., 1)
+ if self.norm_last_dim is None:
+ return ops.norm(vector, None, -1, self.keepdims)
+
return self.norm_last_dim(vector)
diff --git a/MindSPONGE/mindsponge/python/colvar/basic/torsion.py b/MindSPONGE/mindsponge/python/colvar/basic/torsion.py
index a0548da40b6a635d2d63dd699358f72ea7b76af0..bb17c5998bc433353102a1c89593dc58639e9833 100644
--- a/MindSPONGE/mindsponge/python/colvar/basic/torsion.py
+++ b/MindSPONGE/mindsponge/python/colvar/basic/torsion.py
@@ -24,6 +24,8 @@
Collective variables by position
"""
+from inspect import signature
+
import mindspore.numpy as msnp
from mindspore import Tensor
from mindspore import ops, nn
@@ -215,11 +217,17 @@ class Torsion(Colvar):
self.squeeze = ops.Squeeze(axis)
- if self.atoms is None and self.axis_vector is None:
- self.norm = nn.Norm(-1, keepdims)
- else:
- self.norm = nn.Norm(-1, True)
self.reduce_sum = ops.ReduceSum(keepdims)
+
+ self.keepdims = True
+ if self.atoms is None and self.axis_vector is None:
+ self.keepdims = keepdims
+
+ self.norm_last_dim = None
+ # MindSpore < 2.0.0-rc1
+ if 'ord' not in signature(ops.norm).parameters.keys():
+ self.norm_last_dim = nn.Norm(-1, self.keepdims)
+
self.atan2 = ops.Atan2()
def construct(self, coordinate: Tensor, pbc_box: bool = None):
@@ -260,8 +268,12 @@ class Torsion(Colvar):
if self.atoms is None and self.axis_vector is None:
# (B, ...) or (B, ..., 1) <- (B, ..., D)
- dis1 = self.norm(vector1)
- dis2 = self.norm(vector2)
+ if self.norm_last_dim is None:
+ dis1 = ops.norm(vector1, None, -1, self.keepdims)
+ dis2 = ops.norm(vector2, None, -1, self.keepdims)
+ else:
+ dis1 = self.norm_last_dim(vector1)
+ dis2 = self.norm_last_dim(vector2)
dot12 = self.reduce_sum(vector1*vector2, -1)
# (B, ...) or (B, ..., 1)
@@ -274,8 +286,14 @@ class Torsion(Colvar):
vec_b = msnp.cross(vector2, axis_vector)
cross_ab = msnp.cross(vec_a, vec_b)
+ # (B, ..., 1) <- (B, ..., D)
+ if self.norm_last_dim is None:
+ axis_dis = ops.norm(axis_vector, None, -1, self.keepdims)
+ else:
+ axis_dis = self.norm_last_dim(axis_vector)
+
# (B, ..., D) = (B, ..., D) / (B, ...,1)
- axis_vector *= msnp.reciprocal(self.norm(axis_vector))
+ axis_vector *= msnp.reciprocal(axis_dis)
# (B, ...) or (B, ..., 1)
sin_phi = self.reduce_sum(axis_vector*cross_ab, -1)
diff --git a/MindSPONGE/mindsponge/python/colvar/combine.py b/MindSPONGE/mindsponge/python/colvar/combine.py
index 161a9d09c6c3366d0804a1ba64b9087732865882..392186f71c03824290609b090de943274953b570 100644
--- a/MindSPONGE/mindsponge/python/colvar/combine.py
+++ b/MindSPONGE/mindsponge/python/colvar/combine.py
@@ -185,7 +185,7 @@ class ColvarCombine(Colvar):
colvar_.append(cv)
- self.colvar = CellList(colvar_)
+ self.colvar: List[Colvar] = CellList(colvar_)
self._shape = shape
self._ndim = len(self._shape)
diff --git a/MindSPONGE/mindsponge/python/colvar/group.py b/MindSPONGE/mindsponge/python/colvar/group.py
index d329f4882ecf4fe003fc7ada93e8365567a6df75..d03a2fe9072ca0d9688eafc314c99eac17a6c323 100644
--- a/MindSPONGE/mindsponge/python/colvar/group.py
+++ b/MindSPONGE/mindsponge/python/colvar/group.py
@@ -94,7 +94,7 @@ class ColvarGroup(Colvar):
periodic += (F.expand_dims(cv.periodic, 0),)
- self.colvar = CellList(colvar_)
+ self.colvar: List[Colvar] = CellList(colvar_)
if axis == -1:
shape = shape[:-1] + (dim,)
diff --git a/MindSPONGE/mindsponge/python/common/protein.py b/MindSPONGE/mindsponge/python/common/protein.py
index 23052200e78c1841b14b9bc7203002537bcedfbc..d261b161eafbdacb6336530d472047c670b6f645 100644
--- a/MindSPONGE/mindsponge/python/common/protein.py
+++ b/MindSPONGE/mindsponge/python/common/protein.py
@@ -102,7 +102,7 @@ def from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein:
res_shortname = residue_constants.restype_3to1.get(res.resname, 'X')
restype_idx = residue_constants.restype_order.get(
res_shortname, residue_constants.restype_num)
- #print(res_shortname, restype_idx)
+ # print(res_shortname, restype_idx)
pos = np.zeros((residue_constants.atom_type_num, 3))
mask = np.zeros((residue_constants.atom_type_num,))
res_b_factors = np.zeros((residue_constants.atom_type_num,))
@@ -360,6 +360,7 @@ def from_prediction_v2(final_atom_positions,
Returns:
A protein instance.
"""
+
def _maybe_remove_leading_dim(arr: np.ndarray) -> np.ndarray:
return arr[0] if remove_leading_feature_dimension else arr
@@ -377,3 +378,23 @@ def from_prediction_v2(final_atom_positions,
residue_index=residue_index + 1,
chain_index=chain_index,
b_factors=b_factors)
+
+
+def from_prediction_new(features) -> Protein:
+ """Assembles a protein from a prediction.
+
+ Args:
+ features: Dictionary holding model inputs.
+ result: Dictionary holding model outputs.
+
+ Returns:
+ A protein instance.
+ """
+ dist_per_residue = features[4]
+
+ return Protein(
+ aatype=features[0],
+ atom_positions=features[2],
+ atom_mask=features[3],
+ residue_index=features[1] + 1,
+ b_factors=dist_per_residue)
diff --git a/MindSPONGE/mindsponge/python/control/barostat/andersen.py b/MindSPONGE/mindsponge/python/control/barostat/andersen.py
index 654e7e404ab793a48f4bf723021c7444e79dc7a2..b28264ad63d5804c1e9d6bd778a9a3178d8a88e0 100644
--- a/MindSPONGE/mindsponge/python/control/barostat/andersen.py
+++ b/MindSPONGE/mindsponge/python/control/barostat/andersen.py
@@ -30,6 +30,7 @@ from mindspore.ops import functional as F
from . import Barostat
from ...system import Molecule
+from ...function import get_arguments
class AndersenBarostat(Barostat):
@@ -71,6 +72,7 @@ class AndersenBarostat(Barostat):
control_step: int = 1,
compressibility: float = 4.6e-5,
time_constant: float = 1.,
+ **kwargs,
):
super().__init__(
@@ -81,6 +83,7 @@ class AndersenBarostat(Barostat):
compressibility=compressibility,
time_constant=time_constant,
)
+ self._kwargs = get_arguments(locals(), kwargs)
self.h_mass_inverse_0 = F.square(self.time_constant) / self.compressibility
diff --git a/MindSPONGE/mindsponge/python/control/barostat/barostat.py b/MindSPONGE/mindsponge/python/control/barostat/barostat.py
index a4931b8b211f3dae0eef2b39825b231bce45a5ef..122cb398e6f9c92b736f59bea13dacdf040ac100 100644
--- a/MindSPONGE/mindsponge/python/control/barostat/barostat.py
+++ b/MindSPONGE/mindsponge/python/control/barostat/barostat.py
@@ -33,7 +33,7 @@ from mindspore.ops import functional as F
from .. import Controller
from ...system import Molecule
-from ...function import get_ms_array
+from ...function import get_ms_array, get_arguments
class Barostat(Controller):
@@ -72,12 +72,14 @@ class Barostat(Controller):
control_step: int = 1,
compressibility: float = 4.6e-5,
time_constant: float = 1.,
+ **kwargs,
):
super().__init__(
system=system,
control_step=control_step,
)
+ self._kwargs = get_arguments(locals(), kwargs)
self.anisotropic = anisotropic
diff --git a/MindSPONGE/mindsponge/python/control/barostat/berendsen.py b/MindSPONGE/mindsponge/python/control/barostat/berendsen.py
index 6957558cda884a3d921d69438fd32c47568fbf46..6d668a2ff6cc5f6c7af95193e1caacb4eb2edfb7 100644
--- a/MindSPONGE/mindsponge/python/control/barostat/berendsen.py
+++ b/MindSPONGE/mindsponge/python/control/barostat/berendsen.py
@@ -31,6 +31,7 @@ from mindspore.ops import functional as F
from . import Barostat
from ...system import Molecule
+from ...function import get_arguments
class BerendsenBarostat(Barostat):
@@ -72,6 +73,7 @@ class BerendsenBarostat(Barostat):
control_step: int = 1,
compressibility: float = 4.6e-5,
time_constant: float = 1.,
+ **kwargs
):
super().__init__(
@@ -82,6 +84,7 @@ class BerendsenBarostat(Barostat):
compressibility=compressibility,
time_constant=time_constant,
)
+ self._kwargs = get_arguments(locals(), kwargs)
self.ratio = self.control_step * self.time_step / self.time_constant / 3.
diff --git a/MindSPONGE/mindsponge/python/control/constraint/constraint.py b/MindSPONGE/mindsponge/python/control/constraint/constraint.py
index e172a825805244fa1855b10767413acf58f3cd78..70c414e171dab8c519da91d0d329c6e86b60beec 100644
--- a/MindSPONGE/mindsponge/python/control/constraint/constraint.py
+++ b/MindSPONGE/mindsponge/python/control/constraint/constraint.py
@@ -34,6 +34,7 @@ from .. import Controller
from ...system import Molecule
from ...potential import PotentialCell
from ...function.operations import GetVector, GetDistance
+from ...function import get_arguments
class Constraint(Controller):
@@ -62,12 +63,14 @@ class Constraint(Controller):
system: Molecule,
bonds: Union[Tensor, str] = 'h-bonds',
potential: PotentialCell = None,
+ **kwargs,
):
super().__init__(
system=system,
control_step=1,
)
+ self._kwargs = get_arguments(locals(), kwargs)
if potential is None:
self.all_bonds = system.bond
diff --git a/MindSPONGE/mindsponge/python/control/constraint/lincs.py b/MindSPONGE/mindsponge/python/control/constraint/lincs.py
index aa41e58914801b76f693d7c1b44c4a3692e735b5..57515d4fba2d859d5dca9eb780ae3786bc40c51f 100644
--- a/MindSPONGE/mindsponge/python/control/constraint/lincs.py
+++ b/MindSPONGE/mindsponge/python/control/constraint/lincs.py
@@ -37,6 +37,7 @@ from . import Constraint
from ...system import Molecule
from ...potential import PotentialCell
from ...function.operations import GetShiftGrad
+from ...function import get_arguments
class Lincs(Constraint):
@@ -62,6 +63,7 @@ class Lincs(Constraint):
system: Molecule,
bonds: Union[Tensor, str] = 'h-bonds',
potential: PotentialCell = None,
+ **kwargs
):
super().__init__(
@@ -69,6 +71,8 @@ class Lincs(Constraint):
bonds=bonds,
potential=potential,
)
+ self._kwargs = get_arguments(locals(), kwargs)
+
#pylint: disable=invalid-name
# (A,A) <- (A,A)
diff --git a/MindSPONGE/mindsponge/python/control/controller.py b/MindSPONGE/mindsponge/python/control/controller.py
index 8dd023367a8752285dab72a18a263ac534665211..51957fc70ac91fa0839d475e2ec681b08f182e92 100644
--- a/MindSPONGE/mindsponge/python/control/controller.py
+++ b/MindSPONGE/mindsponge/python/control/controller.py
@@ -34,32 +34,49 @@ from mindspore.ops import functional as F
from ..system import Molecule
from ..function import functions as func
-from ..function.functions import get_integer, get_ms_array
+from ..function.functions import get_integer, get_ms_array, get_arguments
class Controller(Cell):
- r"""Base class for the controller module in MindSPONGE.
-
- The `Controller` used in `Updater` to control the values of seven variables during the simulation
- process: coordinate, velocity, force, energy, kinetics, virial and pbc_box.
+ r"""
+ Base class for the controller module in MindSPONGE.
+ The `Controller` used in `Updater` to control the values of seven variables during the simulation
+ process: coordinate, velocity, force, energy, kinetics, virial and pbc_box.
Args:
-
- system (Molecule): Simulation system
-
- control_step (int): Step interval for controller execution. Default: 1
+ system(Molecule): Simulation system
+ control_step(int): Step interval for controller execution. Default: 1
+
+ Inputs:
+ - **coordinate** (Tensor) - Tensor of shape `(B, A, D)`. Data type is float.
+ - **velocity** (Tensor) - Tensor of shape `(B, A, D)`. Data type is float.
+ - **force** (Tensor) - Tensor of shape `(B, A, D)`. Data type is float.
+ - **energy** (Tensor) - Tensor of shape `(B, 1)`. Data type is float.
+ - **kinetics** (Tensor) - Tensor of shape `(B, D)`. Data type is float.
+ - **virial** (Tensor) - Tensor of shape `(B, D)`. Data type is float.
+ - **pbc_box** (Tensor) - Tensor of shape `(B, D)`. Data type is float.
+ - **step** (int) - Simulation step. Default: 0
+
+ Outputs:
+ - coordinate, Tensor of shape `(B, A, D)`. Data type is float.
+ - velocity, Tensor of shape `(B, A, D)`. Data type is float.
+ - force, Tensor of shape `(B, A, D)`. Data type is float.
+ - energy, Tensor of shape `(B, 1)`. Data type is float.
+ - kinetics, Tensor of shape `(B, D)`. Data type is float.
+ - virial, Tensor of shape `(B, D)`. Data type is float.
+ - pbc_box, Tensor of shape `(B, D)`. Data type is float.
Supported Platforms:
-
``Ascend`` ``GPU``
-
"""
def __init__(self,
system: Molecule,
control_step: int = 1,
+ **kwargs,
):
super().__init__(auto_prefix=False)
+ self._kwargs = get_arguments(locals(), kwargs)
self.system = system
self.num_walker = self.system.num_walker
@@ -100,67 +117,72 @@ class Controller(Cell):
@property
def boltzmann(self) -> float:
+ """
+ Boltzmann constant in current unit.
+
+ Returns:
+ float, Boltzmann constant in current unit.
+ """
return self.units.boltzmann
def set_time_step(self, dt: float):
- r"""Set simulation time step
+ r"""
+ Set simulation time step.
Args:
- dt (float): Time step
-
+ dt(float): Time step.
"""
self.time_step = get_ms_array(dt, ms.float32)
return self
def set_degrees_of_freedom(self, dofs: int):
- """Set degrees of freedom (DOFs)
+ """
+ Set degrees of freedom (DOFs).
Args:
- dofs (int): Degrees of freedom
-
+ dofs(int): Degrees of freedom.
"""
self.degrees_of_freedom = get_integer(dofs)
return self
def update_coordinate(self, coordinate: Tensor) -> Tensor:
- r"""Update the coordinate of the simulation system
+ r"""
+ Update the coordinate of the simulation system.
Args:
- coordinate (Tensor): Tensor of atomic coordinates.
- The shape of the Tensor is `(B, A, D)`, and the data type is float.
+ coordinate(Tensor): Tensor of atomic coordinates. Tensor shape is `(B, A, D)`.
+ Data type is float.
Returns:
Tensor, has the same data type and shape as original `coordinate`.
-
"""
return F.assign(self._coordinate, coordinate)
def update_pbc_box(self, pbc_box: Tensor) -> Tensor:
- r"""Update the parameter of PBC box
+ r"""
+ Update the parameter of PBC box.
Args:
- pbc_box (Tensor): Tensor of PBC box.
- The shape of the Tensor is `(B, D)`, and the data type is float.
+ pbc_box(Tensor): Tensor of PBC box. Tensor shape is `(B, D)`.
+ Data type is float.
Returns:
Tensor, has the same data type and shape as original `pbc_box`.
-
"""
if self._pbc_box is None:
return pbc_box
return F.assign(self._pbc_box, pbc_box)
def get_kinetics(self, velocity: Tensor) -> Tensor:
- r"""Calculate kinetics according to velocity
+ r"""
+ Calculate kinetics according to velocity.
Args:
- velocity (Tensor): Tensor of atomic velocities.
- The shape of the Tensor is `(B, A, D)`, and the data type is float.
+ velocity(Tensor): Tensor of atomic velocities. Tensor shape is `(B, A, D)`.
+ Data type is float.
Returns:
- kinetics (Tensor): Tensor of kinetics.
- The shape of the Tensor is `(B, D)`, and the data type is float.
-
+ Tensor, Tensor of kinetics. Tensor shape is `(B, D)`. Data type is float.
"""
if velocity is None:
return None
@@ -171,15 +193,14 @@ class Controller(Cell):
return kinetics * self.kinetic_unit_scale
def get_temperature(self, kinetics: Tensor = None) -> Tensor:
- r"""Calculate temperature according to velocity
+ r"""
+ Calculate temperature according to velocity.
+
Args:
- kinetics (Tensor): Tensor of kinetics.
- The shape of the Tensor is `(B, D)`, and the data type is float.
+ kinetics(Tensor): Tensor of kinetics. Tensor shape is `(B, D)`. Data type is float. Default: None
Returns:
- temperature (Tensor): Tensor of temperature.
- The shape of the Tensor is `(B)`, and the data type is float.
-
+ Tensor, Tensor of temperature. The shape of the Tensor is `(B)`. Data type is float.
"""
if kinetics is None:
return None
@@ -188,16 +209,14 @@ class Controller(Cell):
return 2 * kinetics / self.degrees_of_freedom / self.boltzmann
def get_volume(self, pbc_box: Tensor) -> Tensor:
- r"""Calculate volume according to PBC box
+ r"""
+ Calculate volume according to PBC box
Args:
- pbc_box (Tensor): Tensor of PBC box.
- The shape of the Tensor is `(B, D)`, and the data type is float.
+ pbc_box(Tensor): Tensor of PBC box. Tensor shape is `(B, D)`. Data type is float.
Returns:
- volume (Tensor): Tensor of volume.
- The shape of the Tensor is `(B)`, and the data type is float.
-
+ Tensor, Tensor of volume. The shape of the Tensor is `(B)`, and the data type is float.
"""
if self._pbc_box is None:
return None
@@ -205,20 +224,16 @@ class Controller(Cell):
return func.keepdims_prod(pbc_box, -1)
def get_pressure(self, kinetics: Tensor, virial: Tensor, pbc_box: Tensor) -> Tensor:
- r"""Calculate pressure according to kinetics, viral and PBC box
+ r"""
+ Calculate pressure according to kinetics, viral and PBC box.
Args:
- kinetics (Tensor): Tensor of kinetics.
- The shape of the Tensor is `(B, D)`, and the data type is float.
- virial (Tensor): Tensor of virial.
- The shape of the Tensor is `(B, D)`, and the data type is float.
- pbc_box (Tensor): Tensor of PBC box.
- The shape of the Tensor is `(B, D)`, and the data type is float.
+ kinetics(Tensor): Tensor of kinetics. Tensor shape is `(B, D)`. Data type is float.
+ virial(Tensor): Tensor of virial. Tensor shape is `(B, D)`. Data type is float.
+ pbc_box(Tensor): Tensor of PBC box. Tensor shape is `(B, D)`. Data type is float.
Returns:
- pressure (Tensor): Tensor of pressure.
- The shape of the Tensor is `(B, D)`, and the data type is float.
-
+ Tensor, Tensor of pressure. Tensor shape is `(B, D)`. Data type is float.
"""
if self._pbc_box is None:
return None
@@ -228,18 +243,17 @@ class Controller(Cell):
return pressure * self.press_unit_scale
def get_com(self, coordinate: Tensor, keepdims: bool = True) -> Tensor:
- r"""Get coordinate of center of mass
+ r"""
+ Get coordinate of center of mass.
Args:
- coordinate (Tensor): Tensor of atomic coordinates.
- The shape of the Tensor is `(B, A, D)`, and the data type is float.
- keepdims (bool): If this is set to `True`, the second axis will be left
- in the result as dimensions with size one. Default: True
+ coordinate(Tensor): Tensor of atomic coordinates. Tensor shape is `(B, A, D)`. Data type is float.
+ keepdims(bool): If this is set to `True`, the second axis will be left
+ in the result as dimensions with size one. Default: True
Returns:
- com (Tensor): Tensor of the center of mass.
- The shape of the Tensor is `(B, A, D)` or `(B, D)`, and the data type is float.
-
+ Tensor, Tensor of the coordinate of the center of mass. Tensor shape is `(B, A, D)` or `(B, D)`.
+ Data type is float.
"""
# (B, A, D) = (B, A, D) * (B, A, 1)
@@ -262,16 +276,17 @@ class Controller(Cell):
return com
def get_com_velocity(self, velocity: Tensor, keepdims: bool = True) -> Tensor:
- r"""calculate velocity of center of mass
+ r"""
+ Calculate velocity of center of mass.
+
Args:
- coordinate (Tensor): Tensor of atomic coordinates.
- The shape of the Tensor is `(B, A, D)`, and the data type is float.
- keepdims (bool): If this is set to `True`, the second axis will be left
- in the result as dimensions with size one. Default: True
+ velocity(Tensor): Tensor of velocity. Tensor shape is `(B, A, D)`. Data type is float.
+ keepdims(bool): If this is set to `True`, the second axis will be left
+ in the result as dimensions with size one. Default: True
Returns:
- com_vel (Tensor): Tensor of the velocity of the center of mass.
- The shape of the Tensor is `(B, A, D)` or `(B, D)`, and the data type is float.
+ Tensor, Tensor of the velocity of the center of mass.
+ Tensor shape is `(B, A, D)` or `(B, D)`. Data type is float.
"""
diff --git a/MindSPONGE/mindsponge/python/control/integrator/brownian.py b/MindSPONGE/mindsponge/python/control/integrator/brownian.py
index 0edc78a444199c6f9e4081f1138d73753c1e33d8..e64b0731caa299ec910835acc60f996a6a57ec10 100644
--- a/MindSPONGE/mindsponge/python/control/integrator/brownian.py
+++ b/MindSPONGE/mindsponge/python/control/integrator/brownian.py
@@ -34,6 +34,7 @@ from mindspore.ops import functional as F
from .integrator import Integrator
from ...system import Molecule
+from ...function import get_arguments
class Brownian(Integrator):
@@ -57,6 +58,7 @@ class Brownian(Integrator):
system: Molecule,
temperature: float = 300,
friction_coefficient: float = 1e3,
+ **kwargs,
):
super().__init__(
@@ -65,6 +67,7 @@ class Brownian(Integrator):
barostat=None,
constraint=None,
)
+ self._kwargs = get_arguments(locals(), kwargs)
self.ref_temp = Tensor(temperature, ms.float32)
diff --git a/MindSPONGE/mindsponge/python/control/integrator/integrator.py b/MindSPONGE/mindsponge/python/control/integrator/integrator.py
index 57276c7d48dfe7f2884d56c9c9416d650cebe681..1ca789ca6597a89471a247e9c996fe9b74ca0418 100644
--- a/MindSPONGE/mindsponge/python/control/integrator/integrator.py
+++ b/MindSPONGE/mindsponge/python/control/integrator/integrator.py
@@ -35,7 +35,7 @@ from ..thermostat import Thermostat
from ..barostat import Barostat
from ..constraint import Constraint
from ...system import Molecule
-from ...function.functions import get_integer
+from ...function import get_integer, get_arguments
class Integrator(Controller):
@@ -66,12 +66,14 @@ class Integrator(Controller):
thermostat: Thermostat = None,
barostat: Barostat = None,
constraint: Union[Constraint, List[Constraint]] = None,
+ **kwargs
):
super().__init__(
system=system,
control_step=1,
)
+ self._kwargs = get_arguments(locals(), kwargs)
self.acc_unit_scale = Tensor(self.units.acceleration_ref, ms.float32)
@@ -156,10 +158,9 @@ class Integrator(Controller):
new_name = self.get_name(constraint)
print(f'Change the constraint from "{old_name} to "{new_name}".')
- if constraint is None:
- self.constraint = None
- self.num_constraint_controller = 0
- else:
+ self.constraint: List[Constraint] = None
+ self.num_constraint_controller = 0
+ if constraint is not None:
if isinstance(constraint, Controller):
self.num_constraint_controller = 1
constraint = [constraint]
diff --git a/MindSPONGE/mindsponge/python/control/integrator/leapfrog.py b/MindSPONGE/mindsponge/python/control/integrator/leapfrog.py
index 8365f673a93fdad1bbf07a3d35032e9619859920..da8720535342651347d996e1f59bee6f04a64db3 100644
--- a/MindSPONGE/mindsponge/python/control/integrator/leapfrog.py
+++ b/MindSPONGE/mindsponge/python/control/integrator/leapfrog.py
@@ -33,6 +33,7 @@ from ..thermostat import Thermostat
from ..barostat import Barostat
from ..constraint import Constraint
from ...system import Molecule
+from ...function import get_arguments
class LeapFrog(Integrator):
@@ -65,6 +66,7 @@ class LeapFrog(Integrator):
thermostat: Thermostat = None,
barostat: Barostat = None,
constraint: Constraint = None,
+ **kwargs
):
super().__init__(
@@ -73,6 +75,7 @@ class LeapFrog(Integrator):
barostat=barostat,
constraint=constraint,
)
+ self._kwargs = get_arguments(locals(), kwargs)
def construct(self,
coordinate: Tensor,
diff --git a/MindSPONGE/mindsponge/python/control/integrator/velocityverlet.py b/MindSPONGE/mindsponge/python/control/integrator/velocityverlet.py
index 24ee7b29fc38ce739f828297ff8670c7bd6a05db..758cc0d0c6d621f8c6d3d149defa4665d6af55c7 100644
--- a/MindSPONGE/mindsponge/python/control/integrator/velocityverlet.py
+++ b/MindSPONGE/mindsponge/python/control/integrator/velocityverlet.py
@@ -35,6 +35,7 @@ from ..thermostat import Thermostat
from ..barostat import Barostat
from ..constraint import Constraint
from ...system import Molecule
+from ...function import get_arguments
class VelocityVerlet(Integrator):
@@ -70,6 +71,7 @@ class VelocityVerlet(Integrator):
thermostat: Thermostat = None,
barostat: Barostat = None,
constraint: Constraint = None,
+ **kwargs,
):
super().__init__(
@@ -78,6 +80,7 @@ class VelocityVerlet(Integrator):
barostat=barostat,
constraint=constraint,
)
+ self._kwargs = get_arguments(locals(), kwargs)
# v(t+0.5) = v(t) + 0.5 * a(t) * dt
velocity_half = msnp.zeros_like(self.system.coordinate)
diff --git a/MindSPONGE/mindsponge/python/control/thermostat/berendsen.py b/MindSPONGE/mindsponge/python/control/thermostat/berendsen.py
index ea062e351595be4a99309975754cbab125f2a446..663e0aca8679c7476cf7d238b630ea53a326c73c 100644
--- a/MindSPONGE/mindsponge/python/control/thermostat/berendsen.py
+++ b/MindSPONGE/mindsponge/python/control/thermostat/berendsen.py
@@ -29,6 +29,7 @@ from mindspore import ops
from . import Thermostat
from ...system import Molecule
+from ...function import get_arguments
class BerendsenThermostat(Thermostat):
@@ -69,6 +70,7 @@ class BerendsenThermostat(Thermostat):
time_constant: float = 0.2,
scale_min: float = 0.8,
scale_max: float = 1.25,
+ **kwargs,
):
super().__init__(
@@ -77,6 +79,7 @@ class BerendsenThermostat(Thermostat):
control_step=control_step,
time_constant=time_constant,
)
+ self._kwargs = get_arguments(locals(), kwargs)
self.scale_min = scale_min
self.scale_max = scale_max
diff --git a/MindSPONGE/mindsponge/python/control/thermostat/langevin.py b/MindSPONGE/mindsponge/python/control/thermostat/langevin.py
index e90d19f0f82686503ae36d1ab774f13f9274893e..02c62106e448b7176716d63368439a12dfb473fe 100644
--- a/MindSPONGE/mindsponge/python/control/thermostat/langevin.py
+++ b/MindSPONGE/mindsponge/python/control/thermostat/langevin.py
@@ -31,6 +31,7 @@ from mindspore.ops import functional as F
from .thermostat import Thermostat
from ...system import Molecule
+from ...function import get_arguments
class Langevin(Thermostat):
@@ -72,6 +73,7 @@ class Langevin(Thermostat):
time_constant: float = 0.1,
seed: int = 0,
seed2: int = 0,
+ **kwargs,
):
super().__init__(
@@ -80,6 +82,7 @@ class Langevin(Thermostat):
control_step=control_step,
time_constant=time_constant,
)
+ self._kwargs = get_arguments(locals(), kwargs)
# (B,A,1)
self._inv_sqrt_mass = F.sqrt(self._inv_mass)
diff --git a/MindSPONGE/mindsponge/python/control/thermostat/thermostat.py b/MindSPONGE/mindsponge/python/control/thermostat/thermostat.py
index 9c68666dfb424501f4d4f63f15587a1e33d4b7ff..6a34baeecba2eb27dea66433c959f43478bd6257 100644
--- a/MindSPONGE/mindsponge/python/control/thermostat/thermostat.py
+++ b/MindSPONGE/mindsponge/python/control/thermostat/thermostat.py
@@ -32,6 +32,7 @@ from mindspore.ops import functional as F
from .. import Controller
from ...system import Molecule
+from ...function import get_arguments
from ...function import functions as func
@@ -63,12 +64,14 @@ class Thermostat(Controller):
temperature: float = 300,
control_step: int = 1,
time_constant: float = 0.5,
+ **kwargs,
):
super().__init__(
system=system,
control_step=control_step,
)
+ self._kwargs = get_arguments(locals(), kwargs)
self.ref_temp = func.get_ms_array(temperature, ms.float32).reshape(-1, 1)
self.ref_kinetics = 0.5 * self.degrees_of_freedom * self.boltzmann * self.ref_temp
diff --git a/MindSPONGE/mindsponge/python/core/simulation/energy.py b/MindSPONGE/mindsponge/python/core/simulation/energy.py
index d37027d0faca41296b8896524f820e9b5ba14c97..7580839efa03eb7617388f13feac791d49c2b1d5 100644
--- a/MindSPONGE/mindsponge/python/core/simulation/energy.py
+++ b/MindSPONGE/mindsponge/python/core/simulation/energy.py
@@ -33,7 +33,7 @@ from mindspore import ops
from mindspore.ops import functional as F
from mindspore.nn import Cell, CellList
-from ...function.units import Units
+from ...function import Units, get_arguments
from ...partition import NeighbourList
from ...system import Molecule
from ...potential import PotentialCell
@@ -42,44 +42,36 @@ from ...sampling.wrapper import EnergyWrapper
class WithEnergyCell(Cell):
- r"""Cell that wraps the simulation system with the potential energy function.
-
- This Cell calculates the value of the potential energy of the system at the current coordinates and returns it.
+ r"""
+ Cell that wraps the simulation system with the potential energy function.
+ This Cell calculates the value of the potential energy of the system at the current coordinates and returns it.
Args:
-
- system (Molecule): Simulation system.
-
- potential (PotentialCell): Potential energy function cell.
-
- bias (Union[Bias, List[Bias]]): Bias potential function cell. Default: None
-
- cutoff (float): Cut-off distance for neighbour list. If None is given, it will be assigned
+ system(Molecule): Simulation system.
+ potential(PotentialCell): Potential energy function cell.
+ bias(Union[Bias, List[Bias]]): Bias potential function cell. Default: None
+ cutoff(float): Cut-off distance for neighbour list. If None is given, it will be assigned
as the cutoff value of the of potential energy.
Defulat: None
+ neighbour_list(NeighbourList): Neighbour list. Default: None
+ wrapper(EnergyWrapper): Network to wrap and process potential and bias.
+ Default: None
- neighbour_list (NeighbourList): Neighbour list. Default: None
+ Inputs:
+ - **\*inputs** (Tuple(Tensor)) - Tuple of input tensors of 'WithEnergyCell'.
- wrapper (EnergyWrapper): Network to wrap and process potential and bias.
- Default: None
+ Outputs:
+ energy, Tensor of shape `(B, 1)`. Data type is float. Total potential energy.
Supported Platforms:
-
``Ascend`` ``GPU``
-
Symbols:
-
B: Batchsize, i.e. number of walkers of the simulation.
-
A: Number of the atoms in the simulation system.
-
N: Number of the maximum neighbouring atoms.
-
U: Number of potential energy terms.
-
V: Number of bias potential terms.
-
"""
def __init__(self,
@@ -89,9 +81,11 @@ class WithEnergyCell(Cell):
cutoff: float = None,
neighbour_list: NeighbourList = None,
wrapper: EnergyWrapper = None,
+ **kwargs
):
super().__init__(auto_prefix=False)
+ self._kwargs = get_arguments(locals(), kwargs)
self.system = system
self.potential_function = potential
@@ -99,7 +93,7 @@ class WithEnergyCell(Cell):
self.units = Units(self.system.length_unit, self.potential_function.energy_unit)
self.system.units.set_energy_unit(self.energy_unit)
- self.bias_function = None
+ self.bias_function: List[Bias] = None
self._num_biases = 0
self._bias_names = []
if bias is not None:
@@ -180,101 +174,101 @@ class WithEnergyCell(Cell):
@property
def cutoff(self) -> Tensor:
- r"""cutoff distance for neighbour list
+ r"""
+ Cutoff distance for neighbour list.
Return:
- Tensor, cutoff
-
+ Tensor, cutoff distance.
"""
return self.neighbour_list.cutoff
@property
def neighbour_list_pace(self) -> int:
- r"""update step for neighbour list
+ r"""
+ Update step for neighbour list.
Return:
- int, steps
-
+ int, update steps.
"""
return self.neighbour_list.pace
@property
def length_unit(self) -> str:
- r"""length unit
+ r"""
+ Length unit.
Return:
- str, length unit
-
+ str, length unit.
"""
return self.units.length_unit
@property
def energy_unit(self) -> str:
- r"""energy unit
+ r"""
+ Energy unit.
Return:
- str, energy unit
-
+ str, energy unit.
"""
return self.units.energy_unit
@property
def num_energies(self) -> int:
- r"""number of energy terms :math:`U`
+ r"""
+ Number of energy terms :math:`U`.
Return:
- int, number of energy terms
-
+ int, number of energy terms.
"""
return self.potential_function.num_energies
@property
def num_biases(self) -> int:
- r"""number of bias potential energies :math:`V`
+ r"""
+ Number of bias potential energies :math:`V`.
Return:
- int, number of bias potential energies
-
+ int, number of bias potential energies.
"""
return self._num_biases
@property
def energy_names(self) -> list:
- r"""names of energy terms
+ r"""
+ Names of energy terms.
Return:
- list of str, names of energy terms
-
+ list[str], names of energy terms.
"""
return self.potential_function.energy_names
@property
def bias_names(self) -> list:
- r"""name of bias potential energies
+ r"""
+ Name of bias potential energies.
Return:
- list of str, the bias potential energies
-
+ list[str], the bias potential energies.
"""
return self._bias_names
@property
def energies(self) -> Tensor:
- r"""Tensor of potential energy components.
+ r"""
+ Tensor of potential energy components.
Return:
- energies(Tensor): Tensor of shape `(B, U)`. Data type is float.
-
+ Tensor, Tensor of shape `(B, U)`. Data type is float.
"""
return self.identity(self._energies)
@property
def biases(self) -> Tensor:
- r"""Tensor of bias potential components.
+ r"""
+ Tensor of bias potential components.
Return:
- biases(Tensor): Tensor of shape `(B, V)`. Data type is float.
-
+ Tensor, Tensor of shape `(B, V)`. Data type is float.
"""
if self.bias_function is None:
return None
@@ -282,59 +276,54 @@ class WithEnergyCell(Cell):
@property
def bias(self) -> Tensor:
- r"""Tensor of the total bias potential.
+ r"""
+ Tensor of the total bias potential.
Return:
- bias(Tensor): Tensor of shape `(B, 1)`. Data type is float.
-
+ Tensor, Tensor of shape `(B, 1)`. Data type is float.
"""
return self.identity(self._bias)
def bias_pace(self, index: int = 0) -> int:
- """return the update freqenucy for bias potential
+ """
+ Return the update freqenucy for bias potential.
Args:
- index (int): Index of bias potential
+ index(int): Index of bias potential. Default: 0
Returns:
- update_pace (int): Update freqenucy
-
+ int, update freqenucy.
"""
return self.bias_function[index].update_pace
def set_pbc_grad(self, grad_box: bool):
- r"""set whether to calculate the gradient of PBC box
+ r"""
+ Set whether to calculate the gradient of PBC box.
Args:
- grad_box (bool): Whether to calculate the gradient of PBC box.
-
+ grad_box(bool): Whether to calculate the gradient of PBC box.
"""
self.system.set_pbc_grad(grad_box)
return self
def update_neighbour_list(self) -> Tuple[Tensor, Tensor]:
- r"""update neighbour list
-
- Args:
- coordinate (Tensor): Tensor of shape `(B, A, D)`. Data type is float.
- Position coordinate.
- pbc_box (Tensor): Tensor of shape `(B, D)`. Data type is float.
- Size of PBC box.
+ r"""
+ Update neighbour list.
Returns:
- neigh_idx (Tensor): Tensor of shape `(B, A, N)`. Data type is int.
- Index of neighbouring atoms of each atoms in system.
- neigh_mask (Tensor): Tensor of shape `(B, A, N)`. Data type is bool.
- Mask for neighbour list `neigh_idx`.
+ - neigh_idx, Tensor. Tensor of shape `(B, A, N)`. Data type is int.
+ Index of neighbouring atoms of each atoms in system.
+ - neigh_mask, Tensor. Tensor of shape `(B, A, N)`. Data type is bool.
+ Mask for neighbour list `neigh_idx`.
"""
return self.neighbour_list.update(self.coordinate, self.pbc_box)
def update_bias(self, step: int):
- r"""update bias potential
+ r"""
+ Update bias potential.
Args:
- step (int): Simulatio step.
-
+ step(int): Current simulation step. If it can be divided by update frequency, update the bias potential.
"""
if self.bias_function is not None:
for i in range(self._num_biases):
@@ -343,60 +332,59 @@ class WithEnergyCell(Cell):
return self
def update_wrapper(self, step: int):
- r"""update energy wrapper
+ r"""
+ Update energy wrapper.
Args:
- step (int): Simulatio step.
-
+ step(int): Current simulation step. If it can be divided by update frequency, update the energy wrapper.
"""
if self.wrapper_pace > 0 and step % self.wrapper_pace == 0:
self.energy_wrapper.update()
return self
def get_neighbour_list(self) -> Tuple[Tensor, Tensor]:
- r"""get neighbour list
+ r"""
+ Get neighbour list.
Returns:
- neigh_idx (Tensor): Tensor of shape `(B, A, N)`. Data type is int.
- Index of neighbouring atoms of each atoms in system.
- neigh_mask (Tensor): Tensor of shape `(B, A, N)`. Data type is bool.
- Mask for neighbour list `neigh_idx`.
+ - neigh_idx, Tensor. Tensor of shape `(B, A, N)`. Data type is int.
+ Index of neighbouring atoms of each atoms in system.
+ - neigh_mask, Tensor. Tensor of shape `(B, A, N)`. Data type is bool.
+ Mask for neighbour list `neigh_idx`.
Symbols:
B: Batchsize, i.e. number of walkers of the simulation.
A: Number of the atoms in the simulation system.
N: Number of the maximum neighbouring atoms.
-
"""
return self.neighbour_list.get_neighbour_list()
def calc_energies(self) -> Tensor:
- """calculate the energy terms of the potential energy.
+ """
+ Calculate the energy terms of the potential energy.
Return:
- energies (Tensor): Tensor of shape `(B, U)`. Data type is float.
- Energy terms.
+ Tensor, Tensor of shape `(B, U)`. Data type is float. Energy terms.
Symbols:
B: Batchsize, i.e. number of walkers of the simulation.
U: Number of potential energy terms.
-
"""
- neigh_idx, neigh_pos, neigh_dis, neigh_mask = self.neighbour_list(self.coordinate, self.pbc_box)
+ neigh_idx, neigh_vec, neigh_dis, neigh_mask = self.neighbour_list(self.coordinate, self.pbc_box)
coordinate = self.coordinate * self.length_unit_scale
pbc_box = self.pbc_box
if pbc_box is not None:
pbc_box *= self.length_unit_scale
- neigh_pos *= self.length_unit_scale
+ neigh_vec *= self.length_unit_scale
neigh_dis *= self.length_unit_scale
energies = self.potential_function(
coordinate=coordinate,
neighbour_index=neigh_idx,
neighbour_mask=neigh_mask,
- neighbour_coord=neigh_pos,
+ neighbour_vector=neigh_vec,
neighbour_distance=neigh_dis,
pbc_box=pbc_box
)
@@ -404,27 +392,26 @@ class WithEnergyCell(Cell):
return energies
def calc_biases(self) -> Tensor:
- """calculate the bias potential terms.
+ """
+ Calculate the bias potential terms.
Return:
- biases (Tensor): Tensor of shape `(B, V)`. Data type is float.
- Energy terms.
+ Tensor, Tensor of shape `(B, V)`. Data type is float. Bias potential terms.
Symbols:
B: Batchsize, i.e. number of walkers of the simulation.
V: Number of bias potential terms.
-
"""
if self.bias_function is None:
return None
- neigh_idx, neigh_pos, neigh_dis, neigh_mask = self.neighbour_list(self.coordinate, self.pbc_box)
+ neigh_idx, neigh_vec, neigh_dis, neigh_mask = self.neighbour_list(self.coordinate, self.pbc_box)
coordinate = self.coordinate * self.length_unit_scale
pbc_box = self.pbc_box
if pbc_box is not None:
pbc_box *= self.length_unit_scale
- neigh_pos *= self.length_unit_scale
+ neigh_vec *= self.length_unit_scale
neigh_dis *= self.length_unit_scale
biases = ()
@@ -433,7 +420,7 @@ class WithEnergyCell(Cell):
coordinate=coordinate,
neighbour_index=neigh_idx,
neighbour_mask=neigh_mask,
- neighbour_coord=neigh_pos,
+ neighbour_vector=neigh_vec,
neighbour_distance=neigh_dis,
pbc_box=pbc_box
)
@@ -455,10 +442,10 @@ class WithEnergyCell(Cell):
#pylint: disable=unused-argument
coordinate, pbc_box = self.system()
- neigh_idx, neigh_pos, neigh_dis, neigh_mask = self.neighbour_list(coordinate, pbc_box)
+ neigh_idx, neigh_vec, neigh_dis, neigh_mask = self.neighbour_list(coordinate, pbc_box)
coordinate *= self.length_unit_scale
- neigh_pos *= self.length_unit_scale
+ neigh_vec *= self.length_unit_scale
neigh_dis *= self.length_unit_scale
if pbc_box is not None:
pbc_box *= self.length_unit_scale
@@ -467,7 +454,7 @@ class WithEnergyCell(Cell):
coordinate=coordinate,
neighbour_index=neigh_idx,
neighbour_mask=neigh_mask,
- neighbour_coord=neigh_pos,
+ neighbour_vector=neigh_vec,
neighbour_distance=neigh_dis,
pbc_box=pbc_box
)
@@ -482,7 +469,7 @@ class WithEnergyCell(Cell):
coordinate=coordinate,
neighbour_index=neigh_idx,
neighbour_mask=neigh_mask,
- neighbour_coord=neigh_pos,
+ neighbour_vector=neigh_vec,
neighbour_distance=neigh_dis,
pbc_box=pbc_box
)
diff --git a/MindSPONGE/mindsponge/python/core/simulation/force.py b/MindSPONGE/mindsponge/python/core/simulation/force.py
index b5310caf8be59424722bd618475a8da9810f2bc6..b1075934160d9a13aa3410a739cbc0cc2c3b106f 100644
--- a/MindSPONGE/mindsponge/python/core/simulation/force.py
+++ b/MindSPONGE/mindsponge/python/core/simulation/force.py
@@ -160,6 +160,8 @@ class WithForceCell(Cell):
Tensor, cutoff
"""
+ if self.neighbour_list is None:
+ return None
return self.neighbour_list.cutoff
@property
@@ -170,6 +172,8 @@ class WithForceCell(Cell):
int, step
"""
+ if self.neighbour_list is None:
+ return 0
return self.neighbour_list.pace
@property
@@ -284,13 +288,13 @@ class WithForceCell(Cell):
if pbc_box is not None:
pbc_box *= self.length_unit_scale
- neigh_idx, neigh_pos, neigh_dis, neigh_mask = self.neighbour_list(coordinate, pbc_box)
+ neigh_idx, neigh_vec, neigh_dis, neigh_mask = self.neighbour_list(coordinate, pbc_box)
energy, force, virial = self.force_function(
coordinate=coordinate,
neighbour_index=neigh_idx,
neighbour_mask=neigh_mask,
- neighbour_coord=neigh_pos,
+ neighbour_vector=neigh_vec,
neighbour_distance=neigh_dis,
pbc_box=pbc_box
)
diff --git a/MindSPONGE/mindsponge/python/core/simulation/run.py b/MindSPONGE/mindsponge/python/core/simulation/run.py
index 5adcbeb10b040fa41f7e192c8587e1438ba8c03c..f3fedc2d2ea70dfa7169d3705cc21238460c0f08 100644
--- a/MindSPONGE/mindsponge/python/core/simulation/run.py
+++ b/MindSPONGE/mindsponge/python/core/simulation/run.py
@@ -39,45 +39,42 @@ from mindspore.nn.optim import Optimizer
from .energy import WithEnergyCell
from .force import WithForceCell
-from ...function.functions import get_integer, all_none
+from ...function.functions import get_integer, all_none, get_arguments
from ...optimizer import Updater
class RunOneStepCell(Cell):
- r"""Cell to run one step simulation.
-
- This Cell wraps the `energy` and `force` with the `optimizer`. The backward graph will be created
- in the construct function to update the atomic coordinates of the simulation system.
+ r"""
+ Cell to run one step simulation.
+ This Cell wraps the `energy` and `force` with the `optimizer`. The backward graph will be created
+ in the construct function to update the atomic coordinates of the simulation system.
Args:
-
- energy (WithEnergyCell): Cell that wraps the simulation system with
- the potential energy function.
- Defatul: None
-
- force (WithForceCell): Cell that wraps the simulation system with
- the atomic force function.
- Defatul: None
-
- optimizer (Optimizer): Optimizer for simulation. Defatul: None
-
- steps (int): Steps for JIT. Default: 1
-
- sens (float): The scaling number to be filled as the input of backpropagation.
- Default: 1.0
+ energy(WithEnergyCell): Cell that wraps the simulation system with
+ the potential energy function.
+ Defatul: None
+ force(WithForceCell): Cell that wraps the simulation system with
+ the atomic force function.
+ Defatul: None
+ optimizer(Optimizer): Optimizer for simulation. Defatul: None
+ steps(int): Steps for JIT. Default: 1
+ sens(float): The scaling number to be filled as the input of backpropagation.
+ Default: 1.0
+
+ Inputs:
+ - **\*inputs** (Tuple(Tensor)) - Tuple of input tensors of `WithEnergyCell`.
+
+ Outputs:
+ - energy, Tensor of shape `(B, 1)`. Data type is float. Total potential energy.
+ - force, Tensor of shape `(B, A, D)`. Data type is float. Atomic force.
Supported Platforms:
-
``Ascend`` ``GPU``
Symbols:
-
B: Batchsize, i.e. number of walkers of the simulation.
-
A: Number of the atoms in the simulation system.
-
D: Spatial dimension of the simulation system. Usually is 3.
-
"""
def __init__(self,
energy: WithEnergyCell = None,
@@ -85,9 +82,11 @@ class RunOneStepCell(Cell):
optimizer: Optimizer = None,
steps: int = 1,
sens: float = 1.0,
+ **kwargs
):
super().__init__(auto_prefix=False)
+ self._kwargs = get_arguments(locals(), kwargs)
if all_none([energy, force]):
raise ValueError('energy and force cannot be both None!')
@@ -141,193 +140,203 @@ class RunOneStepCell(Cell):
self.steps = get_integer(steps)
@property
- def neighbour_list_pace(self) -> int:
- r"""update step for neighbour list
+ def neighbour_list_pace(self):
+ r"""
+ update step for neighbour list.
Return:
- int, steps
-
+ int, the number of steps needed for neighbour list updating.
"""
return self._neighbour_list_pace
@property
- def energy_cutoff(self) -> Tensor:
- r"""cutoff distance for neighbour list in WithEnergyCell
+ def energy_cutoff(self):
+ r"""
+ cutoff distance for neighbour list in WithEnergyCell.
Return:
- Tensor, cutoff
-
+ Tensor, cutoff distance for neighbour list in WithEnergyCell.
"""
if self.system_with_energy is None:
return None
return self.system_with_energy.cutoff
@property
- def force_cutoff(self) -> Tensor:
- r"""cutoff distance for neighbour list in WithForceCell
+ def force_cutoff(self):
+ r"""
+ cutoff distance for neighbour list in WithForceCell.
Return:
- Tensor, cutoff
-
+ Tensor, cutoff distance for neighbour list in WithForceCell.
"""
if self.system_with_force is None:
return None
return self.system_with_force.cutoff
@property
- def length_unit(self) -> str:
- r"""length unit
+ def length_unit(self):
+ r"""
+ length unit.
Return:
- str, length unit
-
+ str, length unit.
"""
return self.units.length_unit
@property
- def energy_unit(self) -> str:
- r"""energy unit
+ def energy_unit(self):
+ r"""
+ energy unit.
Return:
- str, energy unit
-
+ str, energy unit.
"""
return self.units.energy_unit
@property
- def num_energies(self) -> int:
- r"""number of energy terms :math:`U`
+ def num_energies(self):
+ r"""
+ number of energy terms :math:`U`.
Return:
- int, number of energy terms
-
+ int, number of energy terms.
"""
if self.system_with_energy is None:
return 0
return self.system_with_energy.num_energies
@property
- def energy_names(self) -> list:
- r"""names of energy terms
+ def energy_names(self):
+ r"""
+ names of energy terms.
Return:
- list of str, names of energy terms
-
+ list[str], names of energy terms.
"""
if self.system_with_energy is None:
return []
return self.system_with_energy.energy_names
@property
- def bias_names(self) -> list:
- r"""name of bias potential energies
+ def bias_names(self):
+ r"""
+ name of bias potential energies.
Return:
- list of str, the bias potential energies
-
+ list[str], the bias potential energies.
"""
if self.system_with_energy is None:
return []
return self.system_with_energy.bias_names
@property
- def num_biases(self) -> int:
- r"""number of bias potential energies :math:`V`
+ def num_biases(self):
+ r"""
+ number of bias potential energies :math:`V`.
Return:
- int, number of bias potential energies
-
+ int, number of bias potential energies.
"""
if self.system_with_energy is None:
return 0
return self.system_with_energy.num_biases
@property
- def energies(self) -> Tensor:
- r"""Tensor of potential energy components.
+ def energies(self):
+ r"""
+ Tensor of potential energy components.
Return:
- energies(Tensor): Tensor of shape `(B, U)`. Data type is float.
-
+ Tensor, Tensor of shape `(B, U)`. Data type is float.
"""
if self.system_with_energy is None:
return None
return self.system_with_energy.energies
@property
- def biases(self) -> Tensor:
- r"""Tensor of bias potential components.
+ def biases(self):
+ r"""
+ Tensor of bias potential components.
Return:
- biases(Tensor): Tensor of shape `(B, V)`. Data type is float.
-
+ Tensor, Tensor of shape `(B, V)`. Data type is float.
"""
if self.system_with_energy is None:
return None
return self.system_with_energy.biases
@property
- def bias(self) -> Tensor:
- r"""Tensor of the total bias potential.
+ def bias(self):
+ r"""
+ Tensor of the total bias potential.
Return:
- bias(Tensor): Tensor of shape `(B, 1)`. Data type is float.
-
+ Tensor, Tensor of shape `(B, 1)`. Data type is float.
"""
if self.system_with_energy is None:
return None
return self.system_with_energy.bias
@property
- def bias_function(self) -> Cell:
- r"""Cell of bias potential function"""
+ def bias_function(self):
+ r"""
+ Cell of bias potential function.
+
+ Return:
+ Cell, bias potential function.
+ """
if self.system_with_energy is None:
return None
return self.system_with_energy.bias_function
def update_neighbour_list(self):
- r"""update neighbour list"""
+ r"""update neighbour list."""
if self.system_with_energy is not None:
self.system_with_energy.update_neighbour_list()
- if self.system_with_force is not None:
+ if self.system_with_force is not None and self.system_with_force.neighbour_list is not None:
self.system_with_force.update_neighbour_list()
return self
def update_bias(self, step: int):
- r"""update bias potential
+ r"""
+ update bias potential.
Args:
- step (int): Simulatio step.
-
+ step(int): Simulation step to update bias potential.
"""
if self.system_with_energy is not None:
self.system_with_energy.update_bias(step)
return self
def update_wrapper(self, step: int):
- r"""update energy wrapper
+ r"""
+ update energy wrapper.
Args:
- step (int): Simulatio step.
-
+ step(int): Simulation step to update energy wrapper.
"""
if self.system_with_energy is not None:
self.system_with_energy.update_wrapper(step)
return self
def update_modifier(self, step: int):
- r"""update force modifier
+ r"""
+ update force modifier.
Args:
- step (int): Simulatio step.
-
+ step(int): Simulation step to update force modifier.
"""
if self.system_with_force is not None:
self.system_with_force.update_modifier(step)
return self
def set_pbc_grad(self, value: bool):
- r"""set whether to calculate the gradient of PBC box"""
+ r"""
+ set whether to calculate the gradient of PBC box.
+
+ Args:
+ value(bool): Flag to judge whether to calculate the gradient of PBC box.
+ """
if self.system_with_energy is not None:
self.system_with_energy.set_pbc_grad(value)
if self.system_with_force is not None:
@@ -335,29 +344,31 @@ class RunOneStepCell(Cell):
return self
def set_steps(self, steps: int):
- r"""set steps for JIT
+ r"""
+ set steps for JIT.
Args:
- step (int): Simulatio step.
-
+ step(int): Simulation step for JIT.
"""
self.steps = get_integer(steps)
return self
@jit
def run_one_step(self, *inputs):
- r"""run one step simulation
+ r"""
+ Run one step simulation.
+
+ Args:
+ *inputs(Tuple(Tensor)): Tuple of input tensors of `WithEnergyCell`.
Returns:
- energy (Tensor): Tensor of shape `(B, 1)`. Data type is float.
- Total potential energy.
- force (Tensor): Tensor of shape `(B, A, D)`. Data type is float.
- Atomic force.
+ - energy, Tensor of shape `(B, 1)`. Data type is float. Total potential energy.
+ - force, Tensor of shape `(B, A, D)`. Data type is float. Atomic force.
+
Symbols:
B: Batchsize, i.e. number of walkers of the simulation.
A: Number of the atoms in the simulation system.
D: Spatial dimension of the simulation system. Usually is 3.
-
"""
energy = 0
force = 0
@@ -385,19 +396,20 @@ class RunOneStepCell(Cell):
return energy, force
def construct(self, *inputs) -> Tuple[Tensor, Tensor]:
- r"""run simulation
+ r"""
+ Run simulation.
+
+ Args:
+ *inputs(list): Inputs of the 'WithEnergyCell'.
Returns:
- energy (Tensor): Tensor of shape `(B, 1)`. Data type is float.
- Total potential energy.
- force (Tensor): Tensor of shape `(B, A, D)`. Data type is float.
- Atomic force.
+ - energy, Tensor of shape `(B, 1)`. Data type is float. Total potential energy.
+ - force, Tensor of shape `(B, A, D)`. Data type is float. Atomic force.
Symbols:
B: Batchsize, i.e. number of walkers of the simulation.
A: Number of the atoms in the simulation system.
D: Spatial dimension of the simulation system. Usually is 3.
-
"""
if self.steps == 1:
return self.run_one_step(*inputs)
diff --git a/MindSPONGE/mindsponge/python/core/sponge.py b/MindSPONGE/mindsponge/python/core/sponge.py
index b18e63cc65d09195e1c688e7c3f6aadd3454d78f..c7e5374f00d6d39cb6d2802f12c4eb63f3fb9089 100644
--- a/MindSPONGE/mindsponge/python/core/sponge.py
+++ b/MindSPONGE/mindsponge/python/core/sponge.py
@@ -50,7 +50,7 @@ from mindspore.dataset.engine.datasets import _set_training_dataset
from .simulation import WithEnergyCell, WithForceCell
from .simulation import RunOneStepCell
from .analysis import AnalysisCell
-from ..function import any_not_none
+from ..function import any_not_none, get_arguments
from ..potential import PotentialCell, ForceCell
from ..optimizer import Updater, UpdaterMD
from ..system.molecule import Molecule
@@ -150,7 +150,9 @@ class Sponge():
optimizer: Optimizer = None,
metrics: dict = None,
analysis: AnalysisCell = None,
+ **kwargs
):
+ self._kwargs = get_arguments(locals(), kwargs)
self._parallel_mode = _get_parallel_mode()
self._device_number = _get_device_num()
@@ -210,7 +212,16 @@ class Sponge():
self.units = self._system.units
- self.time_step = self._optimizer.learning_rate.asnumpy()
+ lr = self._optimizer.learning_rate
+ if self._optimizer.dynamic_lr:
+ if self._optimizer.is_group_lr:
+ lr = ()
+ for learning_rate in self._optimizer.learning_rate:
+ current_dynamic_lr = learning_rate(0)
+ lr += (current_dynamic_lr,)
+ else:
+ lr = self._optimizer.learning_rate(0)
+ self.time_step = lr.asnumpy()
self.coordinate = self._system.coordinate
self.pbc_box = self._system.pbc_box
@@ -335,7 +346,16 @@ class Sponge():
energy=self._system_with_energy, optimizer=self._optimizer)
self._simulation_network.set_pbc_grad(self.use_updater)
- self.time_step = self._optimizer.learning_rate.asnumpy()
+ lr = self._optimizer.learning_rate
+ if self._optimizer.dynamic_lr:
+ if self._optimizer.is_group_lr:
+ lr = ()
+ for learning_rate in self._optimizer.learning_rate:
+ current_dynamic_lr = learning_rate(0)
+ lr += (current_dynamic_lr,)
+ else:
+ lr = self._optimizer.learning_rate(0)
+ self.time_step = lr.asnumpy()
return self
@@ -523,11 +543,11 @@ class Sponge():
s = used_time.seconds
m, s = divmod(s, 60)
h, m = divmod(m, 60)
- if d > 1:
+ if d >= 1:
print('[MindSPONGE] Simulation time: %d days, %d hours, %d minutes and %d seconds.' % (d, h, m, s))
- elif h > 1:
+ elif h >= 1:
print('[MindSPONGE] Simulation time: %d hours %d minutes %d seconds.' % (h, m, s))
- elif m > 1:
+ elif m >= 1:
s += used_time.microseconds / 1e6
print('[MindSPONGE] Simulation time: %d minutes %1.1f seconds.' % (m, s))
else:
diff --git a/MindSPONGE/mindsponge/python/data/__init__.py b/MindSPONGE/mindsponge/python/data/__init__.py
index f9dfe28f7a0c4c5c31f6f35e223cb7488811bb1e..2421467f571cc4156278a86a6befdc88914fca5e 100644
--- a/MindSPONGE/mindsponge/python/data/__init__.py
+++ b/MindSPONGE/mindsponge/python/data/__init__.py
@@ -22,25 +22,22 @@
# ============================================================================
"""Data"""
-from .data import (get_bonded_types, get_dihedral_types, get_improper_types,
- read_yaml, update_dict, write_yaml)
-from .data_transform import atom37_to_frames, atom37_to_torsion_angles
-from .elements import (atomic_mass, element_dict, element_name, element_set,
- elements)
-from .forcefield import get_forcefield
-from .hyperparam import (get_class_parameters, get_hyper_parameter,
- get_hyper_string, load_checkpoint,
- load_hyper_param_into_class, load_hyperparam,
- set_class_into_hyper_param, set_class_parameters,
- set_hyper_parameter, str_to_tensor, tensor_to_str)
+from .element import elements, element_dict, element_name, element_set, atomic_mass
+from .hyperparam import str_to_tensor, tensor_to_str
+from .hyperparam import get_class_parameters, get_hyper_parameter, get_hyper_string
+from .hyperparam import set_class_parameters, set_hyper_parameter, set_class_into_hyper_param
+from .hyperparam import load_hyperparam, load_hyper_param_into_class
+from .template import get_template, get_template_index, get_molecule
from .parameters import ForceFieldParameters
-from .template import get_molecule, get_template, get_template_index
+from .forcefield import get_forcefield
+from .data import read_yaml, write_yaml, update_dict
+from .data import get_bonded_types, get_dihedral_types, get_improper_types
+from .data_transform import atom37_to_frames, atom37_to_torsion_angles
+
-__all__ = ['elements', 'element_dict', 'element_name', 'element_set', 'atomic_mass',
- 'str_to_tensor', 'tensor_to_str', 'get_class_parameters', 'get_hyper_parameter',
- 'get_hyper_string', 'set_class_parameters', 'set_hyper_parameter',
- 'set_class_into_hyper_param', 'load_checkpoint', 'load_hyperparam',
- 'load_hyper_param_into_class', 'get_template', 'get_template_index',
- 'get_molecule', 'ForceFieldParameters', 'get_forcefield', 'read_yaml',
- 'write_yaml', 'update_dict', 'get_bonded_types', 'get_dihedral_types',
- 'get_improper_types', 'atom37_to_frames', 'atom37_to_torsion_angles']
+__all__ = ['ForceFieldParameters', 'get_forcefield',
+ 'atom37_to_frames', 'atom37_to_torsion_angles']
+__all__.extend(element.__all__)
+__all__.extend(hyperparam.__all__)
+__all__.extend(template.__all__)
+__all__.extend(data.__all__)
diff --git a/MindSPONGE/mindsponge/python/data/data.py b/MindSPONGE/mindsponge/python/data/data.py
index b6dfb198776d9366054b63fbfb134901880c48c6..505c0f098273d44400947bf230571e1d9511e9f9 100644
--- a/MindSPONGE/mindsponge/python/data/data.py
+++ b/MindSPONGE/mindsponge/python/data/data.py
@@ -24,11 +24,27 @@
Base function for yaml
"""
+import os
from itertools import permutations
import yaml
import numpy as np
from numpy import ndarray
+from mindspore.train._utils import _make_directory
+
+
+_cur_dir = os.getcwd()
+
+
+__all__ = [
+ 'update_dict',
+ 'read_yaml',
+ 'write_yaml',
+ 'get_bonded_types',
+ 'get_dihedral_types',
+ 'get_improper_types',
+]
+
def update_dict(origin: dict, addition: dict = None) -> dict:
"""
@@ -57,19 +73,27 @@ def update_dict(origin: dict, addition: dict = None) -> dict:
return dictionary
-def write_yaml(filename: str, data: dict):
+def write_yaml(data: dict, filename: str, directory: str = None):
"""
write YAML file.
Args:
- filename(str): Name of YAML file.
data(dict): Dict for output.
+ filename(str): Name of YAML file.
+
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
+ if directory is None:
+ directory = _cur_dir
+ else:
+ directory = _make_directory(directory)
+
+ filename = os.path.join(directory, filename)
+
with open(filename, 'w', encoding="utf-8") as file:
yaml.dump(data, file, sort_keys=False)
@@ -93,12 +117,12 @@ def read_yaml(filename: str) -> dict:
return data
-def get_bonded_types(atom_types: ndarray, symbol: str = '-'):
+def get_bonded_types(atom_type: ndarray, symbol: str = '-'):
"""
get the types of bonded terms including bond, angle and dihedral.
Args:
- atom_types(ndarray): types of atoms.
+ atom_type(ndarray): types of atoms.
symbol(str): a symbol.
Returns:
@@ -107,25 +131,25 @@ def get_bonded_types(atom_types: ndarray, symbol: str = '-'):
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
- num_atoms = atom_types.shape[-1]
+ num_atoms = atom_type.shape[-1]
if num_atoms == 1:
- return atom_types
+ return atom_type
- types = atom_types[..., 0]
+ types = atom_type[..., 0]
for i in range(1, num_atoms):
types = np.char.add(types, symbol)
- types = np.char.add(types, atom_types[..., i])
+ types = np.char.add(types, atom_type[..., i])
return types
-def get_dihedral_types(atom_types: ndarray, symbol: str = '-'):
+def get_dihedral_types(atom_type: ndarray, symbol: str = '-'):
"""
The multi atom name constructor.
Args:
- atom_types(ndarray): types of atoms.
+ atom_type(ndarray): types of atoms.
symbol(str): a symbol.
Returns:
@@ -135,30 +159,30 @@ def get_dihedral_types(atom_types: ndarray, symbol: str = '-'):
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
- num_atoms = atom_types.shape[-1]
+ num_atoms = atom_type.shape[-1]
if num_atoms == 1:
- return atom_types
+ return atom_type
- types = atom_types[..., 0]
+ types = atom_type[..., 0]
for i in range(1, num_atoms):
types = np.char.add(types, symbol)
- types = np.char.add(types, atom_types[..., i])
+ types = np.char.add(types, atom_type[..., i])
- inverse_types = atom_types[..., -1]
+ inverse_types = atom_type[..., -1]
for i in range(1, num_atoms):
inverse_types = np.char.add(inverse_types, symbol)
- inverse_types = np.char.add(inverse_types, atom_types[..., -1-i])
+ inverse_types = np.char.add(inverse_types, atom_type[..., -1-i])
return types, inverse_types
-def get_improper_types(atom_types: ndarray, symbol: str = '-'):
+def get_improper_types(atom_type: ndarray, symbol: str = '-'):
"""
The multi atom name constructor.
Args:
- atom_types(ndarray): types of atoms.
+ atom_type(ndarray): types of atoms.
symbol(str): a symbol.
Returns:
@@ -168,18 +192,18 @@ def get_improper_types(atom_types: ndarray, symbol: str = '-'):
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
"""
- num_atoms = atom_types.shape[-1]
+ num_atoms = atom_type.shape[-1]
if num_atoms == 1:
- return atom_types
+ return atom_type
permuation_types = ()
orders = ()
for combination in permutations(range(num_atoms)):
- types = atom_types[..., combination[0]]
+ types = atom_type[..., combination[0]]
for i in range(1, num_atoms):
types = np.char.add(types, symbol)
- types = np.char.add(types, atom_types[..., combination[i]])
+ types = np.char.add(types, atom_type[..., combination[i]])
permuation_types += (types,)
orders += (combination,)
diff --git a/MindSPONGE/mindsponge/python/data/data_transform.py b/MindSPONGE/mindsponge/python/data/data_transform.py
index b6366c3281a7abb73dce7cb64ff920056e87b87f..2efad5e77c2756eaf5e4ba7d80a33c936a46d1e6 100644
--- a/MindSPONGE/mindsponge/python/data/data_transform.py
+++ b/MindSPONGE/mindsponge/python/data/data_transform.py
@@ -592,7 +592,7 @@ def atom37_to_torsion_angles(
all_atom_mask (numpy.array): Atom37 representation of the mask on all atomic coordinates with
shape :math:`(batch\_size, N_{res})`.
alt_torsions (bool): Indicates whether to set the sign angle of shielding torsion to zero.
- Default: Fal``se.
+ Default: ``False``.
is_multimer (bool): It will be True when multimer is used. Default: ``False``.
Returns:
diff --git a/MindSPONGE/mindsponge/python/data/elements.py b/MindSPONGE/mindsponge/python/data/element.py
similarity index 98%
rename from MindSPONGE/mindsponge/python/data/elements.py
rename to MindSPONGE/mindsponge/python/data/element.py
index 199f1ad480bd3ab94cbce511c736e2f142696a85..641ca84c6f0391fb7cc84b357698f4c42f1104a7 100644
--- a/MindSPONGE/mindsponge/python/data/elements.py
+++ b/MindSPONGE/mindsponge/python/data/element.py
@@ -27,6 +27,16 @@ Information of chemical elements
import numpy as np
+
+__all__ = [
+ 'elements',
+ 'element_dict',
+ 'element_name',
+ 'element_set',
+ 'atomic_mass',
+]
+
+
elements = np.array([
'',
'H',
@@ -149,8 +159,10 @@ elements = np.array([
'Og',
])
+
element_set = set(elements)
+
element_dict = {
'X': 0,
'': 0,
@@ -274,6 +286,7 @@ element_dict = {
'Og': 118,
}
+
element_name = np.array([
'None',
'Hydrogen',
@@ -396,6 +409,7 @@ element_name = np.array([
'Oganesson',
])
+
atomic_mass = np.array([
0.000,
1.008,
diff --git a/MindSPONGE/mindsponge/python/data/forcefield/amber.ff99sb.yaml b/MindSPONGE/mindsponge/python/data/forcefield/amber.ff99sb.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3589bd48f55f66198b258efbc7d8d609ac1236dd
--- /dev/null
+++ b/MindSPONGE/mindsponge/python/data/forcefield/amber.ff99sb.yaml
@@ -0,0 +1,1113 @@
+template:
+ base: protein0.yaml
+ ALA:
+ atom_type: [N, H, CT, H1, CT, HC, HC, HC, C, O]
+ atom_charge: [-0.4157, 0.2719, 0.0337, 0.0823, -0.1825, 0.0603, 0.0603, 0.0603, 0.5973, -0.5679]
+ ARG:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, HC, HC, CT, H1, H1, N2, H, CA, N2, H, H, N2, H, H, C, O]
+ atom_charge: [-0.3479, 0.2747, -0.2637, 0.156, -0.0007, 0.0327, 0.0327, 0.039, 0.0285, 0.0285, 0.0486, 0.0687,
+ 0.0687, -0.5295, 0.3456, 0.8076, -0.8627, 0.4478, 0.4478, -0.8627, 0.4478, 0.4478, 0.7341, -0.5894]
+ ASN:
+ atom_type: [N, H, CT, H1, CT, HC, HC, C, O, N, H, H, C, O]
+ atom_charge: [-0.4157, 0.2719, 0.0143, 0.1048, -0.2041, 0.0797, 0.0797, 0.713, -0.5931, -0.9191, 0.4196, 0.4196,
+ 0.5973, -0.5679]
+ ASP:
+ atom_type: [N, H, CT, H1, CT, HC, HC, C, O2, O2, C, O]
+ atom_charge: [-0.5163, 0.2936, 0.0381, 0.088, -0.0303, -0.0122, -0.0122, 0.7994, -0.8014, -0.8014, 0.5366, -0.5819]
+ CYS:
+ atom_type: [N, H, CT, H1, CT, H1, H1, SH, HS, C, O]
+ atom_charge: [-0.4157, 0.2719, 0.0213, 0.1124, -0.1231, 0.1112, 0.1112, -0.3119, 0.1933, 0.5973, -0.5679]
+ GLN:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, HC, HC, C, O, N, H, H, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0031, 0.085, -0.0036, 0.0171, 0.0171, -0.0645, 0.0352, 0.0352, 0.6951, -0.6086,
+ -0.9407, 0.4251, 0.4251, 0.5973, -0.5679]
+ GLU:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, HC, HC, C, O2, O2, C, O]
+ atom_charge: [-0.5163, 0.2936, 0.0397, 0.1105, 0.056, -0.0173, -0.0173, 0.0136, -0.0425, -0.0425, 0.8054, -0.8188,
+ -0.8188, 0.5366, -0.5819]
+ GLY:
+ atom_type: [N, H, CT, H1, H1, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0252, 0.0698, 0.0698, 0.5973, -0.5679]
+ HID:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CC, NA, H, CR, H5, NB, CV, H4, C, O]
+ atom_charge: [-0.4157, 0.2719, 0.0188, 0.0881, -0.0462, 0.0402, 0.0402, -0.0266, -0.3811, 0.3649, 0.2057, 0.1392,
+ -0.5727, 0.1292, 0.1147, 0.5973, -0.5679]
+ HIS:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CC, NB, CR, H5, NA, H, CW, H4, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0581, 0.136, -0.0074, 0.0367, 0.0367, 0.1868, -0.5432, 0.1635, 0.1435, -0.2795,
+ 0.3339, -0.2207, 0.1862, 0.5973, -0.5679]
+ ILE:
+ atom_type: [N, H, CT, H1, CT, HC, CT, HC, HC, HC, CT, HC, HC, CT, HC, HC, HC, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0597, 0.0869, 0.1303, 0.0187, -0.3204, 0.0882, 0.0882, 0.0882, -0.043, 0.0236,
+ 0.0236, -0.066, 0.0186, 0.0186, 0.0186, 0.5973, -0.5679]
+ LEU:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, HC, CT, HC, HC, HC, CT, HC, HC, HC, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0518, 0.0922, -0.1102, 0.0457, 0.0457, 0.3531, -0.0361, -0.4121, 0.1, 0.1, 0.1,
+ -0.4121, 0.1, 0.1, 0.1, 0.5973, -0.5679]
+ LYS:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, HC, HC, CT, HC, HC, CT, HP, HP, N3, H, H, H, C, O]
+ atom_charge: [-0.3479, 0.2747, -0.24, 0.1426, -0.0094, 0.0362, 0.0362, 0.0187, 0.0103, 0.0103, -0.0479, 0.0621,
+ 0.0621, -0.0143, 0.1135, 0.1135, -0.3854, 0.34, 0.34, 0.34, 0.7341, -0.5894]
+ MET:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, H1, H1, S, CT, H1, H1, H1, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0237, 0.088, 0.0342, 0.0241, 0.0241, 0.0018, 0.044, 0.044, -0.2737, -0.0536,
+ 0.0684, 0.0684, 0.0684, 0.5973, -0.5679]
+ PHE:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CA, CA, HA, CA, HA, CA, HA, CA, HA, CA, HA, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0024, 0.0978, -0.0343, 0.0295, 0.0295, 0.0118, -0.1256, 0.133, -0.1704, 0.143,
+ -0.1072, 0.1297, -0.1704, 0.143, -0.1256, 0.133, 0.5973, -0.5679]
+ PRO:
+ atom_type: [N, CT, H1, H1, CT, HC, HC, CT, HC, HC, CT, H1, C, O]
+ atom_charge: [-0.2548, 0.0192, 0.0391, 0.0391, 0.0189, 0.0213, 0.0213, -0.007, 0.0253, 0.0253, -0.0266, 0.0641,
+ 0.5896, -0.5748]
+ SER:
+ atom_type: [N, H, CT, H1, CT, H1, H1, OH, HO, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0249, 0.0843, 0.2117, 0.0352, 0.0352, -0.6546, 0.4275, 0.5973, -0.5679]
+ THR:
+ atom_type: [N, H, CT, H1, CT, H1, CT, HC, HC, HC, OH, HO, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0389, 0.1007, 0.3654, 0.0043, -0.2438, 0.0642, 0.0642, 0.0642, -0.6761, 0.4102,
+ 0.5973, -0.5679]
+ TRP:
+ atom_type: [N, H, CT, H1, CT, HC, HC, C*, CW, H4, NA, H, CN, CA, HA, CA, HA, CA, HA, CA, HA, CB, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0275, 0.1123, -0.005, 0.0339, 0.0339, -0.1415, -0.1638, 0.2062, -0.3418, 0.3412,
+ 0.138, -0.2601, 0.1572, -0.1134, 0.1417, -0.1972, 0.1447, -0.2387, 0.17, 0.1243, 0.5973, -0.5679]
+ TYR:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CA, CA, HA, CA, HA, C, OH, HO, CA, HA, CA, HA, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0014, 0.0876, -0.0152, 0.0295, 0.0295, -0.0011, -0.1906, 0.1699, -0.2341, 0.1656,
+ 0.3226, -0.5579, 0.3992, -0.2341, 0.1656, -0.1906, 0.1699, 0.5973, -0.5679]
+ VAL:
+ atom_type: [N, H, CT, H1, CT, HC, CT, HC, HC, HC, CT, HC, HC, HC, C, O]
+ atom_charge: [-0.4157, 0.2719, -0.0875, 0.0969, 0.2985, -0.0297, -0.3192, 0.0791, 0.0791, 0.0791, -0.3192, 0.0791,
+ 0.0791, 0.0791, 0.5973, -0.5679]
+ NALA:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, HC, C, O]
+ atom_charge: [0.1414, 0.1997, 0.1997, 0.1997, 0.0962, 0.0889, -0.0597, 0.03, 0.03, 0.03, 0.6163, -0.5722]
+ NARG:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, CT, HC, HC, CT, H1, H1, N2, H, CA, N2, H, H, N2, H, H, C, O]
+ atom_charge: [0.1305, 0.2083, 0.2083, 0.2083, -0.0223, 0.1242, 0.0118, 0.0226, 0.0226, 0.0236, 0.0309, 0.0309,
+ 0.0935, 0.0527, 0.0527, -0.565, 0.3592, 0.8281, -0.8693, 0.4494, 0.4494, -0.8693, 0.4494, 0.4494,
+ 0.7214, -0.6013]
+ NASN:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, C, O, N, H, H, C, O]
+ atom_charge: [0.1801, 0.1921, 0.1921, 0.1921, 0.0368, 0.1231, -0.0283, 0.0515, 0.0515, 0.5833, -0.5744, -0.8634,
+ 0.4097, 0.4097, 0.6163, -0.5722]
+ NASP:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, C, O2, O2, C, O]
+ atom_charge: [0.0782, 0.22, 0.22, 0.22, 0.0292, 0.1141, -0.0235, -0.0169, -0.0169, 0.8194, -0.8084, -0.8084, 0.5621,
+ -0.5889]
+ NCYS:
+ atom_type: [N3, H, H, H, CT, HP, CT, H1, H1, SH, HS, C, O]
+ atom_charge: [0.1325, 0.2023, 0.2023, 0.2023, 0.0927, 0.1411, -0.1195, 0.1188, 0.1188, -0.3298, 0.1975, 0.6123,
+ -0.5713]
+ NGLN:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, CT, HC, HC, C, O, N, H, H, C, O]
+ atom_charge: [0.1493, 0.1996, 0.1996, 0.1996, 0.0536, 0.1015, 0.0651, 0.005, 0.005, -0.0903, 0.0331, 0.0331, 0.7354,
+ -0.6133, -1.0031, 0.4429, 0.4429, 0.6123, -0.5713]
+ NGLU:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, CT, HC, HC, C, O2, O2, C, O]
+ atom_charge: [0.0017, 0.2391, 0.2391, 0.2391, 0.0588, 0.1202, 0.0909, -0.0232, -0.0232, -0.0236, -0.0315, -0.0315,
+ 0.8087, -0.8189, -0.8189, 0.5621, -0.5889]
+ NGLY:
+ atom_type: [N3, H, H, H, CT, HP, HP, C, O]
+ atom_charge: [0.2943, 0.1642, 0.1642, 0.1642, -0.01, 0.0895, 0.0895, 0.6163, -0.5722]
+ NHID:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, CC, NA, H, CR, H5, NB, CV, H4, C, O]
+ atom_charge: [0.1542, 0.1963, 0.1963, 0.1963, 0.0964, 0.0958, 0.0259, 0.0209, 0.0209, -0.0399, -0.3819, 0.3632,
+ 0.2127, 0.1385, -0.5711, 0.1046, 0.1299, 0.6123, -0.5713]
+ NHIS:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, CC, NB, CR, H5, NA, H, CW, H4, C, O]
+ atom_charge: [0.1472, 0.2016, 0.2016, 0.2016, 0.0236, 0.138, 0.0489, 0.0223, 0.0223, 0.174, -0.5579, 0.1804, 0.1397,
+ -0.2781, 0.3324, -0.2349, 0.1963, 0.6123, -0.5713]
+ NILE:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, CT, HC, HC, HC, CT, HC, HC, CT, HC, HC, HC, C, O]
+ atom_charge: [0.0311, 0.2329, 0.2329, 0.2329, 0.0257, 0.1031, 0.1885, 0.0213, -0.372, 0.0947, 0.0947, 0.0947,
+ -0.0387, 0.0201, 0.0201, -0.0908, 0.0226, 0.0226, 0.0226, 0.6123, -0.5713]
+ NLEU:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, CT, HC, CT, HC, HC, HC, CT, HC, HC, HC, C, O]
+ atom_charge: [0.101, 0.2148, 0.2148, 0.2148, 0.0104, 0.1053, -0.0244, 0.0256, 0.0256, 0.3421, -0.038, -0.4106,
+ 0.098, 0.098, 0.098, -0.4104, 0.098, 0.098, 0.098, 0.6123, -0.5713]
+ NLYS:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, CT, HC, HC, CT, HC, HC, CT, HP, HP, N3, H, H, H, C, O]
+ atom_charge: [0.0966, 0.2165, 0.2165, 0.2165, -0.0015, 0.118, 0.0212, 0.0283, 0.0283, -0.0048, 0.0121, 0.0121,
+ -0.0608, 0.0633, 0.0633, -0.0181, 0.1171, 0.1171, -0.3764, 0.3382, 0.3382, 0.3382, 0.7214, -0.6013]
+ NMET:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, CT, H1, H1, S, CT, H1, H1, H1, C, O]
+ atom_charge: [0.1592, 0.1984, 0.1984, 0.1984, 0.0221, 0.1116, 0.0865, 0.0125, 0.0125, 0.0334, 0.0292, 0.0292,
+ -0.2774, -0.0341, 0.0597, 0.0597, 0.0597, 0.6123, -0.5713]
+ NPHE:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, CA, CA, HA, CA, HA, CA, HA, CA, HA, CA, HA, C, O]
+ atom_charge: [0.1737, 0.1921, 0.1921, 0.1921, 0.0733, 0.1041, 0.033, 0.0104, 0.0104, 0.0031, -0.1392, 0.1374,
+ -0.1602, 0.1433, -0.1208, 0.1329, -0.1603, 0.1433, -0.1391, 0.1374, 0.6123, -0.5713]
+ NPRO:
+ atom_type: [N3, H, H, CT, HP, HP, CT, HC, HC, CT, HC, HC, CT, HP, C, O]
+ atom_charge: [-0.202, 0.312, 0.312, -0.012, 0.1, 0.1, -0.121, 0.1, 0.1, -0.115, 0.1, 0.1, 0.1, 0.1, 0.526, -0.5]
+ NSER:
+ atom_type: [N3, H, H, H, CT, HP, CT, H1, H1, OH, HO, C, O]
+ atom_charge: [0.1849, 0.1898, 0.1898, 0.1898, 0.0567, 0.0782, 0.2596, 0.0273, 0.0273, -0.6714, 0.4239, 0.6163,
+ -0.5722]
+ NTHR:
+ atom_type: [N3, H, H, H, CT, HP, CT, H1, CT, HC, HC, HC, OH, HO, C, O]
+ atom_charge: [0.1812, 0.1934, 0.1934, 0.1934, 0.0034, 0.1087, 0.4514, -0.0323, -0.2554, 0.0627, 0.0627, 0.0627,
+ -0.6764, 0.407, 0.6163, -0.5722]
+ NTRP:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, C*, CW, H4, NA, H, CN, CA, HA, CA, HA, CA, HA, CA, HA, CB, C, O]
+ atom_charge: [0.1913, 0.1888, 0.1888, 0.1888, 0.0421, 0.1162, 0.0543, 0.0222, 0.0222, -0.1654, -0.1788, 0.2195,
+ -0.3444, 0.3412, 0.1575, -0.271, 0.1589, -0.108, 0.1411, -0.2034, 0.1458, -0.2265, 0.1646, 0.1132,
+ 0.6123, -0.5713]
+ NTYR:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, HC, CA, CA, HA, CA, HA, C, OH, HO, CA, HA, CA, HA, C, O]
+ atom_charge: [0.194, 0.1873, 0.1873, 0.1873, 0.057, 0.0983, 0.0659, 0.0102, 0.0102, -0.0205, -0.2002, 0.172,
+ -0.2239, 0.165, 0.3139, -0.5578, 0.4001, -0.2239, 0.165, -0.2002, 0.172, 0.6123, -0.5713]
+ NVAL:
+ atom_type: [N3, H, H, H, CT, HP, CT, HC, CT, HC, HC, HC, CT, HC, HC, HC, C, O]
+ atom_charge: [0.0577, 0.2272, 0.2272, 0.2272, -0.0054, 0.1093, 0.3196, -0.0221, -0.3129, 0.0735, 0.0735, 0.0735,
+ -0.3129, 0.0735, 0.0735, 0.0735, 0.6163, -0.5722]
+ CALA:
+ atom_type: [N, H, CT, H1, CT, HC, HC, HC, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.1747, 0.1067, -0.2093, 0.0764, 0.0764, 0.0764, 0.7731, -0.8055, -0.8055]
+ CARG:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, HC, HC, CT, H1, H1, N2, H, CA, N2, H, H, N2, H, H, C, O2, O2]
+ atom_charge: [-0.3481, 0.2764, -0.3068, 0.1447, -0.0374, 0.0371, 0.0371, 0.0744, 0.0185, 0.0185, 0.1114, 0.0468,
+ 0.0468, -0.5564, 0.3479, 0.8368, -0.8737, 0.4493, 0.4493, -0.8737, 0.4493, 0.4493, 0.8557, -0.8266,
+ -0.8266]
+ CASN:
+ atom_type: [N, H, CT, H1, CT, HC, HC, C, O, N, H, H, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.208, 0.1358, -0.2299, 0.1023, 0.1023, 0.7153, -0.601, -0.9084, 0.415, 0.415,
+ 0.805, -0.8147, -0.8147]
+ CASP:
+ atom_type: [N, H, CT, H1, CT, HC, HC, C, O2, O2, C, O2, O2]
+ atom_charge: [-0.5192, 0.3055, -0.1817, 0.1046, -0.0677, -0.0212, -0.0212, 0.8851, -0.8162, -0.8162, 0.7256,
+ -0.7887, -0.7887]
+ CCYS:
+ atom_type: [N, H, CT, H1, CT, H1, H1, SH, HS, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.1635, 0.1396, -0.1996, 0.1437, 0.1437, -0.3102, 0.2068, 0.7497, -0.7981, -0.7981]
+ CGLN:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, HC, HC, C, O, N, H, H, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.2248, 0.1232, -0.0664, 0.0452, 0.0452, -0.021, 0.0203, 0.0203, 0.7093, -0.6098,
+ -0.9574, 0.4304, 0.4304, 0.7775, -0.8042, -0.8042]
+ CGLU:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, HC, HC, C, O2, O2, C, O2, O2]
+ atom_charge: [-0.5192, 0.3055, -0.2059, 0.1399, 0.0071, -0.0078, -0.0078, 0.0675, -0.0548, -0.0548, 0.8183, -0.822,
+ -0.822, 0.742, -0.793, -0.793]
+ CGLY:
+ atom_type: [N, H, CT, H1, H1, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.2493, 0.1056, 0.1056, 0.7231, -0.7855, -0.7855]
+ CHID:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CC, NA, H, CR, H5, NB, CV, H4, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.1739, 0.11, -0.1046, 0.0565, 0.0565, 0.0293, -0.3892, 0.3755, 0.1925, 0.1418,
+ -0.5629, 0.1001, 0.1241, 0.7615, -0.8016, -0.8016]
+ CHIS:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CC, NB, CR, H5, NA, H, CW, H4, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.2699, 0.165, -0.1068, 0.062, 0.062, 0.2724, -0.5517, 0.1558, 0.1448, -0.267,
+ 0.3319, -0.2588, 0.1957, 0.7916, -0.8065, -0.8065]
+ CILE:
+ atom_type: [N, H, CT, H1, CT, HC, CT, HC, HC, HC, CT, HC, HC, CT, HC, HC, HC, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.31, 0.1375, 0.0363, 0.0766, -0.3498, 0.1021, 0.1021, 0.1021, -0.0323, 0.0321,
+ 0.0321, -0.0699, 0.0196, 0.0196, 0.0196, 0.8343, -0.819, -0.819]
+ CLEU:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, HC, CT, HC, HC, HC, CT, HC, HC, HC, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.2847, 0.1346, -0.2469, 0.0974, 0.0974, 0.3706, -0.0374, -0.4163, 0.1038, 0.1038,
+ 0.1038, -0.4163, 0.1038, 0.1038, 0.1038, 0.8326, -0.8199, -0.8199]
+ CLYS:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, HC, HC, CT, HC, HC, CT, HP, HP, N3, H, H, H, C, O2, O2]
+ atom_charge: [-0.3481, 0.2764, -0.2903, 0.1438, -0.0538, 0.0482, 0.0482, 0.0227, 0.0134, 0.0134, -0.0392, 0.0611,
+ 0.0611, -0.0176, 0.1121, 0.1121, -0.3741, 0.3374, 0.3374, 0.3374, 0.8488, -0.8252, -0.8252]
+ CMET:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CT, H1, H1, S, CT, H1, H1, H1, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.2597, 0.1277, -0.0236, 0.048, 0.048, 0.0492, 0.0317, 0.0317, -0.2692, -0.0376,
+ 0.0625, 0.0625, 0.0625, 0.8013, -0.8105, -0.8105]
+ CPHE:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CA, CA, HA, CA, HA, CA, HA, CA, HA, CA, HA, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.1825, 0.1098, -0.0959, 0.0443, 0.0443, 0.0552, -0.13, 0.1408, -0.1847, 0.1461,
+ -0.0944, 0.128, -0.1847, 0.1461, -0.13, 0.1408, 0.766, -0.8026, -0.8026]
+ CPRO:
+ atom_type: [N, CT, H1, H1, CT, HC, HC, CT, HC, HC, CT, H1, C, O2, O2]
+ atom_charge: [-0.2802, 0.0434, 0.0331, 0.0331, 0.0466, 0.0172, 0.0172, -0.0543, 0.0381, 0.0381, -0.1336, 0.0776,
+ 0.6631, -0.7697, -0.7697]
+ CSER:
+ atom_type: [N, H, CT, H1, CT, H1, H1, OH, HO, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.2722, 0.1304, 0.1123, 0.0813, 0.0813, -0.6514, 0.4474, 0.8113, -0.8132, -0.8132]
+ CTHR:
+ atom_type: [N, H, CT, H1, CT, H1, CT, HC, HC, HC, OH, HO, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.242, 0.1207, 0.3025, 0.0078, -0.1853, 0.0586, 0.0586, 0.0586, -0.6496, 0.4119,
+ 0.781, -0.8044, -0.8044]
+ CTRP:
+ atom_type: [N, H, CT, H1, CT, HC, HC, C*, CW, H4, NA, H, CN, CA, HA, CA, HA, CA, HA, CA, HA, CB, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.2084, 0.1272, -0.0742, 0.0497, 0.0497, -0.0796, -0.1808, 0.2043, -0.3316, 0.3413,
+ 0.1222, -0.2594, 0.1567, -0.102, 0.1401, -0.2287, 0.1507, -0.1837, 0.1491, 0.1078, 0.7658, -0.8011,
+ -0.8011]
+ CTYR:
+ atom_type: [N, H, CT, H1, CT, HC, HC, CA, CA, HA, CA, HA, C, OH, HO, CA, HA, CA, HA, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.2015, 0.1092, -0.0752, 0.049, 0.049, 0.0243, -0.1922, 0.178, -0.2458, 0.1673,
+ 0.3395, -0.5643, 0.4017, -0.2458, 0.1673, -0.1922, 0.178, 0.7817, -0.807, -0.807]
+ CVAL:
+ atom_type: [N, H, CT, H1, CT, HC, CT, HC, HC, HC, CT, HC, HC, HC, C, O2, O2]
+ atom_charge: [-0.3821, 0.2681, -0.3438, 0.1438, 0.194, 0.0308, -0.3064, 0.0836, 0.0836, 0.0836, -0.3064, 0.0836,
+ 0.0836, 0.0836, 0.835, -0.8173, -0.8173]
+ ACE:
+ atom_type: [HC, CT, HC, HC, C, O]
+ atom_charge: [0.112298, -0.3661936, 0.112298, 0.112298, 0.5971897, -0.5678902]
+ NME:
+ atom_type: [N, H, CT, H1, H1, H1]
+ atom_charge: [-0.4156928, 0.2718953, -0.1489974, 0.0975983, 0.0975983, 0.0975983]
+
+parameters:
+ bond_energy:
+ length_unit: nm
+ energy_unit: kj/mol
+ parameter_names:
+ pattern: [bond_length, force_constant]
+ parameters:
+ C-C: [0.1525, 259408.0]
+ C-CA: [0.1409, 392459.2]
+ C-CB: [0.1419, 374049.6]
+ C-CM: [0.1444, 343088.0]
+ C-CT: [0.1522, 265265.6]
+ C-N: [0.1335, 410032.0]
+ C-N*: [0.1383, 354803.2]
+ C-NA: [0.1388, 349782.4]
+ C-NC: [0.1358, 382417.6]
+ C-O: [0.1229, 476976.0]
+ C-O2: [0.125, 548940.8]
+ C-OH: [0.1364, 376560.0]
+ C-OS: [0.1323, 376560.0]
+ C-H4: [0.108, 307105.6]
+ C-H5: [0.108, 307105.6]
+ CA-CA: [0.14, 392459.2]
+ CA-CB: [0.1404, 392459.2]
+ CA-CM: [0.1433, 357313.6]
+ CA-CN: [0.14, 392459.2]
+ CA-CT: [0.151, 265265.6]
+ CA-HA: [0.108, 307105.6]
+ CA-H4: [0.108, 307105.6]
+ CA-N2: [0.134, 402500.8]
+ CA-NA: [0.1381, 357313.6]
+ CA-NC: [0.1339, 404174.4]
+ CA-OH: [0.1364, 376560.0]
+ CB-CB: [0.137, 435136.0]
+ CB-N*: [0.1374, 364844.8]
+ CB-NB: [0.1391, 346435.2]
+ CB-NC: [0.1354, 385764.8]
+ CD-HA: [0.108, 307105.6]
+ CD-CD: [0.14, 392459.2]
+ CD-CM: [0.135, 459403.2]
+ CD-CT: [0.151, 265265.6]
+ CK-H5: [0.108, 307105.6]
+ CK-N*: [0.1371, 368192.0]
+ CK-NB: [0.1304, 442667.2]
+ CM-CM: [0.135, 459403.2]
+ CM-CT: [0.151, 265265.6]
+ CM-HA: [0.108, 307105.6]
+ CM-H4: [0.108, 307105.6]
+ CM-H5: [0.108, 307105.6]
+ CM-N*: [0.1365, 374886.4]
+ CM-OS: [0.124, 401664.0]
+ CQ-H5: [0.108, 307105.6]
+ CQ-NC: [0.1324, 420073.6]
+ CT-CT: [0.1526, 259408.0]
+ CT-HC: [0.109, 284512.0]
+ CT-H1: [0.109, 284512.0]
+ CT-H2: [0.109, 284512.0]
+ CT-H3: [0.109, 284512.0]
+ CT-HP: [0.109, 284512.0]
+ CT-N*: [0.1475, 282001.6]
+ CT-N2: [0.1463, 282001.6]
+ CT-OH: [0.141, 267776.0]
+ CT-OS: [0.141, 267776.0]
+ C*-HC: [0.108, 307105.6]
+ C*-CB: [0.1459, 324678.4]
+ C*-CT: [0.1495, 265265.6]
+ C*-CW: [0.1352, 456892.8]
+ CB-CN: [0.1419, 374049.6]
+ CC-CT: [0.1504, 265265.6]
+ CC-CV: [0.1375, 428441.6]
+ CC-CW: [0.1371, 433462.4]
+ CC-NA: [0.1385, 353129.6]
+ CC-NB: [0.1394, 343088.0]
+ CN-NA: [0.138, 358150.4]
+ CR-H5: [0.108, 307105.6]
+ CR-NA: [0.1343, 399153.6]
+ CR-NB: [0.1335, 408358.4]
+ CT-N: [0.1449, 282001.6]
+ CT-N3: [0.1471, 307105.6]
+ CT-NT: [0.1471, 307105.6]
+ CT-S: [0.181, 189953.6]
+ CT-SH: [0.181, 198321.6]
+ CT-CY: [0.1458, 334720.0]
+ CT-CZ: [0.1459, 334720.0]
+ CV-H4: [0.108, 307105.6]
+ CV-NB: [0.1394, 343088.0]
+ CW-H4: [0.108, 307105.6]
+ CW-NA: [0.1381, 357313.6]
+ CY-NY: [0.115, 502080.0]
+ CZ-CZ: [0.1206, 502080.0]
+ CZ-HZ: [0.1056, 334720.0]
+ O2-P: [0.148, 439320.0]
+ OH-P: [0.161, 192464.0]
+ OS-P: [0.161, 192464.0]
+ H-N2: [0.101, 363171.2]
+ H-N*: [0.101, 363171.2]
+ H-NA: [0.101, 363171.2]
+ H-N: [0.101, 363171.2]
+ H-N3: [0.101, 363171.2]
+ H-NT: [0.101, 363171.2]
+ HO-OH: [0.096, 462750.4]
+ HO-OS: [0.096, 462750.4]
+ HS-SH: [0.1336, 229283.2]
+ S-S: [0.2038, 138908.8]
+ F-CT: [0.138, 307105.6]
+ Cl-CT: [0.1766, 194137.6]
+ Br-CT: [0.1944, 133051.2]
+ I-CT: [0.2166, 123846.4]
+ F-CA: [0.1359, 323004.8]
+ Cl-CA: [0.1727, 161502.4]
+ I-CA: [0.2075, 143092.8]
+ Br-CA: [0.189, 143929.6]
+ LP-O: [0.02, 502080.0]
+ LP-OH: [0.02, 502080.0]
+ LP-OS: [0.02, 502080.0]
+ LP-N3: [0.02, 502080.0]
+ LP-NT: [0.02, 502080.0]
+ LP-NB: [0.02, 502080.0]
+ LP-NC: [0.02, 502080.0]
+ LP-S: [0.07, 502080.0]
+ LP-SH: [0.07, 502080.0]
+ angle_energy:
+ length_unit: nm
+ energy_unit: kj/mol
+ parameter_names:
+ pattern: [bond_angle, force_constant]
+ parameters:
+ C-C-O: [120.0, 669.44]
+ C-C-OH: [120.0, 669.44]
+ CA-C-CA: [120.0, 527.184]
+ CA-C-OH: [120.0, 585.76]
+ CB-C-NA: [111.3, 585.76]
+ CB-C-O: [128.8, 669.44]
+ CM-C-NA: [114.1, 585.76]
+ CM-C-O: [125.3, 669.44]
+ CT-C-O: [120.4, 669.44]
+ CT-C-O2: [117.0, 585.76]
+ CT-C-N: [116.6, 585.76]
+ CT-C-CT: [117.0, 527.184]
+ CT-C-OS: [115.0, 669.44]
+ CT-C-OH: [110.0, 669.44]
+ N*-C-NA: [115.4, 585.76]
+ N*-C-NC: [118.6, 585.76]
+ N*-C-O: [120.9, 669.44]
+ NA-C-O: [120.6, 669.44]
+ NC-C-O: [122.5, 669.44]
+ N-C-O: [122.9, 669.44]
+ O-C-O: [126.0, 669.44]
+ O-C-OH: [120.0, 669.44]
+ O-C-OS: [125.0, 669.44]
+ O2-C-O2: [126.0, 669.44]
+ H4-C-C: [120.0, 418.4]
+ H4-C-CM: [115.0, 418.4]
+ H4-C-CT: [115.0, 418.4]
+ H4-C-O: [120.0, 418.4]
+ H4-C-OH: [120.0, 418.4]
+ H5-C-N: [120.0, 418.4]
+ H5-C-O: [119.0, 418.4]
+ H5-C-OH: [107.0, 418.4]
+ H5-C-OS: [107.0, 418.4]
+ C-CA-CA: [120.0, 527.184]
+ C-CA-HA: [120.0, 418.4]
+ CA-CA-CA: [120.0, 527.184]
+ CA-CA-CB: [120.0, 527.184]
+ CA-CA-CT: [120.0, 585.76]
+ CA-CA-HA: [120.0, 418.4]
+ CA-CA-H4: [120.0, 418.4]
+ CA-CA-OH: [120.0, 585.76]
+ CA-CA-CN: [120.0, 527.184]
+ CB-CA-HA: [120.0, 418.4]
+ CB-CA-H4: [120.0, 418.4]
+ CB-CA-N2: [123.5, 585.76]
+ CB-CA-NC: [117.3, 585.76]
+ CM-CA-N2: [120.1, 585.76]
+ CM-CA-NC: [121.5, 585.76]
+ CN-CA-HA: [120.0, 418.4]
+ NA-CA-NC: [123.3, 585.76]
+ N2-CA-NA: [116.0, 585.76]
+ N2-CA-NC: [119.3, 585.76]
+ N2-CA-N2: [120.0, 585.76]
+ F-CA-CA: [121.0, 585.76]
+ Cl-CA-CA: [118.8, 585.76]
+ Br-CA-CA: [118.8, 585.76]
+ I-CA-CA: [118.8, 585.76]
+ C-CB-CB: [119.2, 527.184]
+ C-CB-NB: [130.0, 585.76]
+ CA-CB-CB: [117.3, 527.184]
+ CA-CB-NB: [132.4, 585.76]
+ CB-CB-N*: [106.2, 585.76]
+ CB-CB-NB: [110.4, 585.76]
+ CB-CB-NC: [127.7, 585.76]
+ C*-CB-CA: [134.9, 527.184]
+ C*-CB-CN: [108.8, 527.184]
+ CA-CB-CN: [116.2, 527.184]
+ N*-CB-NC: [126.2, 585.76]
+ CD-CD-CM: [120.0, 527.184]
+ CD-CD-CT: [120.0, 585.76]
+ CM-CD-CT: [120.0, 585.76]
+ HA-CD-HA: [119.0, 292.88]
+ HA-CD-CD: [120.0, 418.4]
+ HA-CD-CM: [120.0, 418.4]
+ H5-CK-N*: [123.05, 418.4]
+ H5-CK-NB: [123.05, 418.4]
+ N*-CK-NB: [113.9, 585.76]
+ C-CM-CM: [120.7, 527.184]
+ C-CM-CT: [119.7, 585.76]
+ C-CM-HA: [119.7, 418.4]
+ C-CM-H4: [119.7, 418.4]
+ CA-CM-CM: [117.0, 527.184]
+ CA-CM-HA: [123.3, 418.4]
+ CA-CM-H4: [123.3, 418.4]
+ CM-CM-CT: [119.7, 585.76]
+ CM-CM-HA: [119.7, 418.4]
+ CM-CM-H4: [119.7, 418.4]
+ CM-CM-N*: [121.2, 585.76]
+ CM-CM-OS: [125.0, 669.44]
+ H4-CM-N*: [119.1, 418.4]
+ H4-CM-OS: [113.0, 418.4]
+ HA-CM-HA: [120.0, 292.88]
+ HA-CM-CD: [120.0, 418.4]
+ HA-CM-CT: [120.0, 418.4]
+ NC-CQ-NC: [129.1, 585.76]
+ H5-CQ-NC: [115.45, 418.4]
+ H1-CT-H1: [109.5, 292.88]
+ H1-CT-N*: [109.5, 418.4]
+ H1-CT-OH: [109.5, 418.4]
+ H1-CT-OS: [109.5, 418.4]
+ H1-CT-CM: [109.5, 418.4]
+ H1-CT-CY: [110.0, 418.4]
+ H1-CT-CZ: [110.0, 418.4]
+ H1-CT-N: [109.5, 418.4]
+ H1-CT-S: [109.5, 418.4]
+ H1-CT-SH: [109.5, 418.4]
+ H1-CT-N2: [109.5, 418.4]
+ H1-CT-NT: [109.5, 418.4]
+ H2-CT-H2: [109.5, 292.88]
+ H2-CT-N*: [109.5, 418.4]
+ H2-CT-OS: [109.5, 418.4]
+ HP-CT-HP: [109.5, 292.88]
+ HP-CT-N3: [109.5, 418.4]
+ HC-CT-HC: [109.5, 292.88]
+ HC-CT-CM: [109.5, 418.4]
+ HC-CT-CD: [109.5, 418.4]
+ HC-CT-CZ: [110.0, 418.4]
+ C-CT-H1: [109.5, 418.4]
+ C-CT-HP: [109.5, 418.4]
+ C-CT-HC: [109.5, 418.4]
+ C-CT-N: [110.1, 527.184]
+ C-CT-N3: [111.2, 669.44]
+ C-CT-CT: [111.1, 527.184]
+ C-CT-OS: [109.5, 502.08]
+ CA-CT-HC: [109.5, 418.4]
+ CC-CT-CT: [113.1, 527.184]
+ CC-CT-HC: [109.5, 418.4]
+ CM-CT-CT: [111.0, 527.184]
+ CM-CT-OS: [109.5, 418.4]
+ CT-CT-CT: [109.5, 334.72]
+ CT-CT-HC: [109.5, 418.4]
+ CT-CT-H1: [109.5, 418.4]
+ CT-CT-H2: [109.5, 418.4]
+ CT-CT-HP: [109.5, 418.4]
+ CT-CT-N*: [109.5, 418.4]
+ CT-CT-OH: [109.5, 418.4]
+ CT-CT-OS: [109.5, 418.4]
+ CT-CT-S: [114.7, 418.4]
+ CT-CT-SH: [108.6, 418.4]
+ CT-CT-CA: [114.0, 527.184]
+ CT-CT-N2: [111.2, 669.44]
+ CT-CT-N: [109.7, 669.44]
+ CT-CT-N3: [111.2, 669.44]
+ CT-CT-NT: [111.2, 669.44]
+ CT-CT-CY: [110.0, 527.184]
+ CT-CT-CZ: [110.0, 527.184]
+ C*-CT-CT: [115.6, 527.184]
+ C*-CT-HC: [109.5, 418.4]
+ OS-CT-OS: [101.0, 1338.88]
+ OS-CT-CY: [110.0, 418.4]
+ OS-CT-CZ: [110.0, 418.4]
+ OS-CT-N*: [109.5, 418.4]
+ F-CT-F: [109.1, 644.336]
+ F-CT-H1: [109.5, 418.4]
+ F-CT-CT: [109.0, 418.4]
+ F-CT-H2: [109.5, 418.4]
+ Cl-CT-CT: [108.5, 418.4]
+ Cl-CT-H1: [108.5, 418.4]
+ Br-CT-CT: [108.0, 418.4]
+ Br-CT-H1: [106.5, 418.4]
+ I-CT-CT: [106.0, 418.4]
+ CT-CC-NA: [120.0, 585.76]
+ CT-CC-CV: [120.0, 585.76]
+ CT-CC-NB: [120.0, 585.76]
+ CV-CC-NA: [120.0, 585.76]
+ CW-CC-NA: [120.0, 585.76]
+ CW-CC-NB: [120.0, 585.76]
+ CT-CC-CW: [120.0, 585.76]
+ H5-CR-NA: [120.0, 418.4]
+ H5-CR-NB: [120.0, 418.4]
+ NA-CR-NA: [120.0, 585.76]
+ NA-CR-NB: [120.0, 585.76]
+ CC-CV-H4: [120.0, 418.4]
+ CC-CV-NB: [120.0, 585.76]
+ H4-CV-NB: [120.0, 418.4]
+ CC-CW-H4: [120.0, 418.4]
+ CC-CW-NA: [120.0, 585.76]
+ C*-CW-H4: [120.0, 418.4]
+ C*-CW-NA: [108.7, 585.76]
+ H4-CW-NA: [120.0, 418.4]
+ CB-C*-CT: [128.6, 585.76]
+ CB-C*-CW: [106.4, 527.184]
+ CT-C*-CW: [125.0, 585.76]
+ CA-CN-CB: [122.7, 527.184]
+ CA-CN-NA: [132.8, 585.76]
+ CB-CN-NA: [104.4, 585.76]
+ CT-CY-NY: [180.0, 669.44]
+ CT-CZ-CZ: [180.0, 669.44]
+ CZ-CZ-HZ: [180.0, 418.4]
+ C-N-CT: [121.9, 418.4]
+ C-N-H: [120.0, 418.4]
+ CT-N-H: [118.04, 418.4]
+ CT-N-CT: [118.0, 418.4]
+ H-N-H: [120.0, 292.88]
+ C-N*-CM: [121.6, 585.76]
+ C-N*-CT: [117.6, 585.76]
+ C-N*-H: [119.2, 418.4]
+ CB-N*-CK: [105.4, 585.76]
+ CB-N*-CT: [125.8, 585.76]
+ CB-N*-H: [125.8, 418.4]
+ CK-N*-CT: [128.8, 585.76]
+ CK-N*-H: [128.8, 418.4]
+ CM-N*-CT: [121.2, 585.76]
+ CM-N*-H: [121.2, 418.4]
+ CA-N2-H: [120.0, 418.4]
+ CA-N2-CT: [123.2, 418.4]
+ CT-N2-H: [118.4, 418.4]
+ H-N2-H: [120.0, 292.88]
+ CT-N3-H: [109.5, 418.4]
+ CT-N3-CT: [109.5, 418.4]
+ H-N3-H: [109.5, 292.88]
+ CT-NT-H: [109.5, 418.4]
+ CT-NT-CT: [109.5, 418.4]
+ H-NT-H: [109.5, 292.88]
+ C-NA-C: [126.4, 585.76]
+ C-NA-CA: [125.2, 585.76]
+ C-NA-H: [116.8, 418.4]
+ CA-NA-H: [118.0, 418.4]
+ CC-NA-CR: [120.0, 585.76]
+ CC-NA-H: [120.0, 418.4]
+ CR-NA-CW: [120.0, 585.76]
+ CR-NA-H: [120.0, 418.4]
+ CW-NA-H: [120.0, 418.4]
+ CN-NA-CW: [111.6, 585.76]
+ CN-NA-H: [123.1, 418.4]
+ CB-NB-CK: [103.8, 585.76]
+ CC-NB-CR: [117.0, 585.76]
+ CR-NB-CV: [117.0, 585.76]
+ C-NC-CA: [120.5, 585.76]
+ CA-NC-CB: [112.2, 585.76]
+ CA-NC-CQ: [118.6, 585.76]
+ CB-NC-CQ: [111.0, 585.76]
+ C-OH-HO: [113.0, 418.4]
+ CA-OH-HO: [113.0, 418.4]
+ CT-OH-HO: [108.5, 460.24]
+ HO-OH-P: [108.5, 376.56]
+ C-OS-CT: [117.0, 502.08]
+ CM-OS-CT: [117.0, 502.08]
+ CT-OS-CT: [109.5, 502.08]
+ CT-OS-P: [120.5, 836.8]
+ P-OS-P: [120.5, 836.8]
+ O2-P-OH: [108.23, 376.56]
+ O2-P-O2: [119.9, 1171.52]
+ O2-P-OS: [108.23, 836.8]
+ OH-P-OS: [102.6, 376.56]
+ OS-P-OS: [102.6, 376.56]
+ CT-S-CT: [98.9, 518.816]
+ CT-S-S: [103.7, 569.024]
+ CT-SH-HS: [96.0, 359.824]
+ HS-SH-HS: [92.07, 292.88]
+ CB-NB-LP: [126.0, 1255.2]
+ CC-NB-LP: [126.0, 1255.2]
+ CK-NB-LP: [126.0, 1255.2]
+ CR-NB-LP: [126.0, 1255.2]
+ CV-NB-LP: [126.0, 1255.2]
+ C-NC-LP: [120.0, 1255.2]
+ CA-NC-LP: [120.0, 1255.2]
+ CB-NC-LP: [120.0, 1255.2]
+ CQ-NC-LP: [120.0, 1255.2]
+ CT-N3-LP: [109.5, 1255.2]
+ H-N3-LP: [109.5, 1255.2]
+ CT-NT-LP: [109.5, 1255.2]
+ H-NT-LP: [109.5, 1255.2]
+ C-O-LP: [120.0, 1255.2]
+ LP-O-LP: [120.0, 1255.2]
+ C-OH-LP: [120.0, 1255.2]
+ CT-OH-LP: [109.5, 1255.2]
+ HO-OH-LP: [109.5, 1255.2]
+ LP-OH-LP: [109.5, 1255.2]
+ C-OS-LP: [109.5, 1255.2]
+ CM-OS-LP: [109.5, 1255.2]
+ CT-OS-LP: [109.5, 1255.2]
+ LP-OS-LP: [109.5, 1255.2]
+ CT-S-LP: [90.0, 1255.2]
+ CT-SH-LP: [90.0, 1255.2]
+ P-OS-LP: [109.5, 1255.2]
+ LP-S-LP: [180.0, 1255.2]
+ LP-SH-LP: [180.0, 1255.2]
+ HS-SH-LP: [90.0, 1255.2]
+ dihedral_energy:
+ length_unit: nm
+ energy_unit: kj/mol
+ parameter_names:
+ pattern:
+ - [phase, force_constant, periodicity]
+ parameters:
+ ?-C-C-?:
+ - [180.0, 30.334, 2.0]
+ ?-C-CA-?:
+ - [180.0, 30.334, 2.0]
+ ?-C-CB-?:
+ - [180.0, 25.104, 2.0]
+ ?-C-CM-?:
+ - [180.0, 18.2, 2.0]
+ ?-C-CT-?:
+ - [0.0, 0.0, 2.0]
+ ?-C-N-?:
+ - [180.0, 20.92, 2.0]
+ ?-C-N*-?:
+ - [180.0, 12.134, 2.0]
+ ?-C-NA-?:
+ - [180.0, 11.297, 2.0]
+ ?-C-NC-?:
+ - [180.0, 33.472, 2.0]
+ ?-C-O-?:
+ - [180.0, 23.43, 2.0]
+ ?-C-OH-?:
+ - [180.0, 19.246, 2.0]
+ ?-C-OS-?:
+ - [180.0, 22.594, 2.0]
+ ?-CA-CA-?:
+ - [180.0, 30.334, 2.0]
+ ?-CA-CB-?:
+ - [180.0, 29.288, 2.0]
+ ?-CA-CM-?:
+ - [180.0, 21.338, 2.0]
+ ?-CA-CN-?:
+ - [180.0, 30.334, 2.0]
+ ?-CA-CT-?:
+ - [0.0, 0.0, 2.0]
+ ?-CA-N2-?:
+ - [180.0, 20.083, 2.0]
+ ?-CA-NA-?:
+ - [180.0, 12.552, 2.0]
+ ?-CA-NC-?:
+ - [180.0, 40.166, 2.0]
+ ?-CA-OH-?:
+ - [180.0, 7.531, 2.0]
+ ?-CB-CB-?:
+ - [180.0, 45.606, 2.0]
+ ?-CB-CN-?:
+ - [180.0, 25.104, 2.0]
+ ?-CB-N*-?:
+ - [180.0, 13.807, 2.0]
+ ?-CB-NB-?:
+ - [180.0, 21.338, 2.0]
+ ?-CB-NC-?:
+ - [180.0, 34.727, 2.0]
+ ?-CC-CT-?:
+ - [0.0, 0.0, 2.0]
+ ?-CC-CV-?:
+ - [180.0, 43.095, 2.0]
+ ?-CC-CW-?:
+ - [180.0, 44.978, 2.0]
+ ?-CC-NA-?:
+ - [180.0, 11.715, 2.0]
+ ?-CC-NB-?:
+ - [180.0, 20.083, 2.0]
+ ?-CD-CD-?:
+ - [180.0, 8.368, 2.0]
+ ?-CD-CT-?:
+ - [0.0, 0.0, 2.0]
+ ?-CD-CM-?:
+ - [180.0, 55.647, 2.0]
+ ?-CK-N*-?:
+ - [180.0, 14.226, 2.0]
+ ?-CK-NB-?:
+ - [180.0, 83.68, 2.0]
+ ?-CM-CM-?:
+ - [180.0, 55.647, 2.0]
+ ?-CM-CT-?:
+ - [0.0, 0.0, 3.0]
+ ?-CM-N*-?:
+ - [180.0, 15.481, 2.0]
+ ?-CM-OS-?:
+ - [180.0, 8.786, 2.0]
+ ?-CN-NA-?:
+ - [180.0, 12.761, 2.0]
+ ?-CQ-NC-?:
+ - [180.0, 56.902, 2.0]
+ ?-CT-CT-?:
+ - [0.0, 1.302, 3.0]
+ ?-CT-CY-?:
+ - [0.0, 0.0, 1.0]
+ ?-CT-CZ-?:
+ - [0.0, 0.0, 1.0]
+ ?-CT-N-?:
+ - [0.0, 0.0, 2.0]
+ ?-CT-N*-?:
+ - [0.0, 0.0, 2.0]
+ ?-CT-N2-?:
+ - [0.0, 0.0, 3.0]
+ ?-CT-NT-?:
+ - [0.0, 2.51, 3.0]
+ ?-CT-N3-?:
+ - [0.0, 1.302, 3.0]
+ ?-CT-OH-?:
+ - [0.0, 1.395, 3.0]
+ ?-CT-OS-?:
+ - [0.0, 3.208, 3.0]
+ ?-CT-S-?:
+ - [0.0, 2.789, 3.0]
+ ?-CT-SH-?:
+ - [0.0, 2.092, 3.0]
+ ?-C*-CB-?:
+ - [180.0, 14.016, 2.0]
+ ?-C*-CT-?:
+ - [0.0, 0.0, 2.0]
+ ?-C*-CW-?:
+ - [180.0, 54.601, 2.0]
+ ?-CR-NA-?:
+ - [180.0, 19.456, 2.0]
+ ?-CR-NB-?:
+ - [180.0, 41.84, 2.0]
+ ?-CV-NB-?:
+ - [180.0, 20.083, 2.0]
+ ?-CW-NA-?:
+ - [180.0, 12.552, 2.0]
+ ?-OH-P-?:
+ - [0.0, 2.092, 3.0]
+ ?-OS-P-?:
+ - [0.0, 2.092, 3.0]
+ N-CT-C-N:
+ - [0.0, 0.0, -4.0]
+ - [180.0, 4.602, -3.0]
+ - [180.0, 13.221, -2.0]
+ - [180.0, 3.766, 1.0]
+ C-N-CT-C:
+ - [0.0, 0.0, -4.0]
+ - [0.0, 3.515, -3.0]
+ - [0.0, 2.259, -2.0]
+ - [0.0, 0.0, 1.0]
+ CT-CT-N-C:
+ - [0.0, 0.0, -4.0]
+ - [0.0, 3.347, -3.0]
+ - [0.0, 16.736, -2.0]
+ - [0.0, 16.736, 1.0]
+ CT-CT-C-N:
+ - [0.0, 0.0, -4.0]
+ - [0.0, 3.347, -3.0]
+ - [0.0, 1.674, -2.0]
+ - [0.0, 1.674, 1.0]
+ H-N-C-O:
+ - [180.0, 20.92, -2.0]
+ - [0.0, 16.736, 1.0]
+ CT-S-S-CT:
+ - [0.0, 29.288, -2.0]
+ - [0.0, 5.021, 3.0]
+ OH-P-OS-CT:
+ - [0.0, 2.092, -3.0]
+ - [0.0, 10.042, 2.0]
+ OS-P-OS-CT:
+ - [0.0, 2.092, -3.0]
+ - [0.0, 10.042, 2.0]
+ H1-CT-C-O:
+ - [0.0, 6.694, -1.0]
+ - [0.0, 0.0, -2.0]
+ - [180.0, 0.669, 3.0]
+ HC-CT-C-O:
+ - [0.0, 6.694, -1.0]
+ - [0.0, 0.0, -2.0]
+ - [180.0, 0.669, 3.0]
+ HC-CT-CT-HC:
+ - [0.0, 1.255, 3.0]
+ HC-CT-CT-CT:
+ - [0.0, 1.339, 3.0]
+ HC-CT-CM-CM:
+ - [180.0, 3.18, -3.0]
+ - [0.0, 9.623, 1.0]
+ HO-OH-CT-CT:
+ - [0.0, 1.339, -3.0]
+ - [0.0, 2.092, 1.0]
+ HO-OH-C-O:
+ - [180.0, 19.246, -2.0]
+ - [0.0, 15.899, 1.0]
+ CM-CM-C-O:
+ - [180.0, 18.2, -2.0]
+ - [0.0, 2.51, 3.0]
+ CT-CM-CM-CT:
+ - [180.0, 55.647, -2.0]
+ - [180.0, 15.899, 1.0]
+ CT-CT-CT-CT:
+ - [0.0, 1.506, -3.0]
+ - [180.0, 2.092, -2.0]
+ - [180.0, 1.674, 1.0]
+ CT-CT-NT-CT:
+ - [0.0, 2.51, -3.0]
+ - [180.0, 4.017, 2.0]
+ CT-CT-OS-CT:
+ - [0.0, 3.205, -3.0]
+ - [180.0, 0.837, 2.0]
+ CT-CT-OS-C:
+ - [0.0, 3.205, -3.0]
+ - [180.0, 6.694, 1.0]
+ CT-OS-CT-OS:
+ - [0.0, 0.837, -3.0]
+ - [180.0, 7.113, -2.0]
+ - [180.0, 11.297, 1.0]
+ CT-OS-CT-N*:
+ - [0.0, 3.205, -3.0]
+ - [0.0, 5.439, 2.0]
+ CT-CZ-CZ-HZ:
+ - [0.0, 0.0, 1.0]
+ O-C-OS-CT:
+ - [180.0, 22.594, -2.0]
+ - [180.0, 11.715, 1.0]
+ OS-CT-N*-CK:
+ - [0.0, 0.0, -2.0]
+ - [0.0, 20.92, 1.0]
+ OS-CT-N*-CM:
+ - [0.0, 0.0, -2.0]
+ - [0.0, 20.92, 1.0]
+ OS-CT-CT-OS:
+ - [0.0, 1.205, -3.0]
+ - [0.0, 9.832, 2.0]
+ OS-CT-CT-OH:
+ - [0.0, 1.205, -3.0]
+ - [0.0, 9.832, 2.0]
+ OH-CT-CT-OH:
+ - [0.0, 1.205, -3.0]
+ - [0.0, 9.832, 2.0]
+ F-CT-CT-F:
+ - [0.0, 0.0, -3.0]
+ - [180.0, 10.042, 1.0]
+ Cl-CT-CT-Cl:
+ - [0.0, 0.0, -3.0]
+ - [180.0, 3.766, 1.0]
+ Br-CT-CT-Br:
+ - [0.0, 0.0, -3.0]
+ - [180.0, 0.0, 1.0]
+ H1-CT-CT-OS:
+ - [0.0, 0.0, -3.0]
+ - [0.0, 2.092, 1.0]
+ H1-CT-CT-OH:
+ - [0.0, 0.0, -3.0]
+ - [0.0, 2.092, 1.0]
+ H1-CT-CT-F:
+ - [0.0, 0.0, -3.0]
+ - [0.0, 1.59, 1.0]
+ H1-CT-CT-Cl:
+ - [0.0, 0.0, -3.0]
+ - [0.0, 2.092, 1.0]
+ H1-CT-CT-Br:
+ - [0.0, 0.0, -3.0]
+ - [0.0, 4.602, 1.0]
+ HC-CT-CT-OS:
+ - [0.0, 0.0, -3.0]
+ - [0.0, 2.092, 1.0]
+ HC-CT-CT-OH:
+ - [0.0, 0.0, -3.0]
+ - [0.0, 2.092, 1.0]
+ HC-CT-CT-F:
+ - [0.0, 0.0, -3.0]
+ - [0.0, 1.59, 1.0]
+ HC-CT-CT-Cl:
+ - [0.0, 0.0, -3.0]
+ - [0.0, 2.092, 1.0]
+ HC-CT-CT-Br:
+ - [0.0, 0.0, -3.0]
+ - [0.0, 4.602, 1.0]
+ H1-CT-NT-LP:
+ - [0.0, 0.0, 3.0]
+ CT-CT-NT-LP:
+ - [0.0, 0.0, 3.0]
+ CT-C-N-LP:
+ - [180.0, 0.0, 2.0]
+ O-C-N-LP:
+ - [180.0, 0.0, 2.0]
+ H1-CT-OH-LP:
+ - [0.0, 0.0, 3.0]
+ CT-CT-OH-LP:
+ - [0.0, 0.0, 3.0]
+ H1-CT-OS-LP:
+ - [0.0, 0.0, 3.0]
+ H2-CT-OS-LP:
+ - [0.0, 0.0, 3.0]
+ CT-CT-OS-LP:
+ - [0.0, 0.0, 3.0]
+ CM-CM-OS-LP:
+ - [180.0, 0.0, 2.0]
+ HA-CM-OS-LP:
+ - [180.0, 0.0, 2.0]
+ H4-CM-OS-LP:
+ - [180.0, 0.0, 2.0]
+ improper_energy:
+ length_unit: nm
+ energy_unit: kj/mol
+ parameter_names:
+ pattern:
+ - [phase, force_constant, periodicity]
+ parameters:
+ ?-?-C-O:
+ - [180.0, 87.864, 2]
+ ?-O2-C-O2:
+ - [180.0, 87.864, 2]
+ ?-?-N-H:
+ - [180.0, 8.368, 2]
+ ?-?-N2-H:
+ - [180.0, 8.368, 2]
+ ?-?-NA-H:
+ - [180.0, 8.368, 2]
+ ?-N2-CA-N2:
+ - [180.0, 87.864, 2]
+ ?-CT-N-CT:
+ - [180.0, 8.368, 2]
+ ?-?-CA-HA:
+ - [180.0, 9.205, 2]
+ ?-?-CW-H4:
+ - [180.0, 9.205, 2]
+ ?-?-CR-H5:
+ - [180.0, 9.205, 2]
+ ?-?-CV-H4:
+ - [180.0, 9.205, 2]
+ ?-?-CQ-H5:
+ - [180.0, 9.205, 2]
+ ?-?-CK-H5:
+ - [180.0, 9.205, 2]
+ ?-?-CM-H4:
+ - [180.0, 9.205, 2]
+ ?-?-CM-HA:
+ - [180.0, 9.205, 2]
+ ?-?-CA-H4:
+ - [180.0, 9.205, 2]
+ ?-?-CA-H5:
+ - [180.0, 9.205, 2]
+ CB-CK-N*-CT:
+ - [180.0, 8.368, 2]
+ C-CM-N*-CT:
+ - [180.0, 8.368, 2]
+ CT-O-C-OH:
+ - [180.0, 87.864, 2]
+ CT-CV-CC-NA:
+ - [180.0, 9.205, 2]
+ CT-CW-CC-NB:
+ - [180.0, 9.205, 2]
+ CT-CW-CC-NA:
+ - [180.0, 9.205, 2]
+ CB-CT-C*-CW:
+ - [180.0, 9.205, 2]
+ CA-CA-CA-CT:
+ - [180.0, 9.205, 2]
+ C-CM-CM-CT:
+ - [180.0, 9.205, 2]
+ CM-N2-CA-NC:
+ - [180.0, 9.205, 2]
+ CB-N2-CA-NC:
+ - [180.0, 9.205, 2]
+ N2-NA-CA-NC:
+ - [180.0, 9.205, 2]
+ CA-CA-C-OH:
+ - [180.0, 9.205, 2]
+ CA-CA-CA-OH:
+ - [180.0, 9.205, 2]
+ H5-O-C-OH:
+ - [180.0, 9.205, 2]
+ H5-O-C-OS:
+ - [180.0, 9.205, 2]
+ CM-CT-CM-HA:
+ - [180.0, 9.205, 2]
+ Br-CA-CA-CA:
+ - [180.0, 9.205, 2]
+ CM-H4-C-O:
+ - [180.0, 9.205, 2]
+ C-CT-N-H:
+ - [180.0, 9.205, 2]
+ C-CT-N-O:
+ - [180.0, 9.205, 2]
+ coulomb_energy:
+ length_unit: nm
+ energy_unit: kj/mol
+ vdw_energy:
+ length_unit: nm
+ energy_unit: kj/mol
+ parameter_names:
+ pattern: [sigma, epsilon]
+ parameters:
+ H: [0.1069078, 0.0656888]
+ HO: [0.0, 0.0]
+ HS: [0.1069078, 0.0656888]
+ HC: [0.2649533, 0.0656888]
+ H1: [0.2471353, 0.0656888]
+ H2: [0.2293173, 0.0656888]
+ H3: [0.2114994, 0.0656888]
+ HP: [0.1959977, 0.0656888]
+ HA: [0.2599642, 0.06276]
+ H4: [0.2510553, 0.06276]
+ H5: [0.2421463, 0.06276]
+ HZ: [0.2599642, 0.06276]
+ O: [0.2959922, 0.87864]
+ O2: [0.2959922, 0.87864]
+ OH: [0.3066473, 0.8803136]
+ OS: [0.3000012, 0.71128]
+ C*: [0.339967, 0.359824]
+ CA: [0.339967, 0.359824]
+ CB: [0.339967, 0.359824]
+ CC: [0.339967, 0.359824]
+ CD: [0.339967, 0.359824]
+ CK: [0.339967, 0.359824]
+ CM: [0.339967, 0.359824]
+ CN: [0.339967, 0.359824]
+ CQ: [0.339967, 0.359824]
+ CR: [0.339967, 0.359824]
+ CV: [0.339967, 0.359824]
+ CW: [0.339967, 0.359824]
+ CY: [0.339967, 0.359824]
+ CZ: [0.339967, 0.359824]
+ CT: [0.339967, 0.4577296]
+ C: [0.339967, 0.359824]
+ N: [0.3249999, 0.71128]
+ NA: [0.3249999, 0.71128]
+ N2: [0.3249999, 0.71128]
+ N*: [0.3249999, 0.71128]
+ NC: [0.3249999, 0.71128]
+ NB: [0.3249999, 0.71128]
+ NT: [0.3249999, 0.71128]
+ NY: [0.3249999, 0.71128]
+ N3: [0.3249999, 0.71128]
+ S: [0.3563595, 1.046]
+ SH: [0.3563595, 1.046]
+ P: [0.3741775, 0.8368]
+ IM: [0.440104, 0.4184]
+ Li: [0.2025904, 0.0765672]
+ IP: [0.3328398, 0.0115897]
+ Na: [0.3328398, 0.0115897]
+ K: [0.4736018, 0.0013724]
+ Rb: [0.5266993, 0.0007113]
+ Cs: [0.6049202, 0.0003372]
+ MG: [0.1412253, 3.7434248]
+ C0: [0.3052397, 1.9237572]
+ Zn: [0.1959977, 0.0523]
+ F: [0.3118146, 0.255224]
+ Cl: [0.3470941, 1.10876]
+ Br: [0.395559, 1.33888]
+ I: [0.4187224, 1.6736]
+ IB: [0.8908987, 0.4184]
+ LP: [0.0, 0.0]
+ nb_pair_energy:
+ length_unit: nm
+ energy_unit: kj/mol
+ parameter_names:
+ pattern: [r_scale, r6_scale, r12_scale]
+ parameters:
+ ?: [0.8333333, 0.5, 0.5]
diff --git a/MindSPONGE/mindsponge/python/data/hyperparam.py b/MindSPONGE/mindsponge/python/data/hyperparam.py
index d60140da4fadbf15cc837b1872b9343145a7ef9c..eab263fb220aad0870265e775cbe991559f6b068 100644
--- a/MindSPONGE/mindsponge/python/data/hyperparam.py
+++ b/MindSPONGE/mindsponge/python/data/hyperparam.py
@@ -31,6 +31,20 @@ from mindspore.train import load_checkpoint
from ..function.functions import get_integer
+__all__ = [
+ 'str_to_tensor',
+ 'tensor_to_str',
+ 'get_class_parameters',
+ 'get_hyper_parameter',
+ 'get_hyper_string',
+ 'set_class_parameters',
+ 'set_hyper_parameter',
+ 'set_class_into_hyper_param',
+ 'load_hyperparam',
+ 'load_hyper_param_into_class',
+]
+
+
def str_to_tensor(string: str) -> Tensor:
"""
encode string to Tensor[int]
diff --git a/MindSPONGE/mindsponge/python/data/parameters.py b/MindSPONGE/mindsponge/python/data/parameters.py
index f2a4bc981f667e1bce71934f5d5ad2d081655f06..f52512e90df71b07f88619f7c1ad8bff807ce8cf 100644
--- a/MindSPONGE/mindsponge/python/data/parameters.py
+++ b/MindSPONGE/mindsponge/python/data/parameters.py
@@ -61,7 +61,7 @@ class ForceFieldParameters:
Getting parameters for given bonds and atom types.
Args:
- atom_types(str): The atom types defined in forcefields.
+ atom_type(str): The atom types defined in forcefields.
parameters(dict): A dictionary stores all force field constants.
atom_names(str): Unique atom names in an amino acid. Default: None
atom_charges(ndarray): The charge of the atoms. Default: None
@@ -70,10 +70,10 @@ class ForceFieldParameters:
``Ascend`` ``GPU``
"""
- def __init__(self, atom_types, parameters, atom_names=None, atom_charges=None):
- self.atom_types = atom_types[0]
+ def __init__(self, atom_type, parameters, atom_names=None, atom_charges=None):
+ self.atom_type = atom_type[0]
self.atom_names = atom_names[0]
- atom_nums = atom_types.shape[-1]
+ atom_nums = atom_type.shape[-1]
assert atom_nums > 0
self.atom_charges = atom_charges
self.atom_nums = atom_nums
@@ -201,7 +201,7 @@ class ForceFieldParameters:
return params
- def get_dihedral_params(self, dihedrals_in, atom_types):
+ def get_dihedral_params(self, dihedrals_in, atom_type):
"""
Get the force field dihedral parameters.
@@ -212,7 +212,7 @@ class ForceFieldParameters:
Returns:
dict, params.
"""
- dihedral_atoms = np.take(atom_types, dihedrals_in, -1)
+ dihedral_atoms = np.take(atom_type, dihedrals_in, -1)
k_index = self._dihedrals['parameter_names']["pattern"][0].index('force_constant')
phi_index = self._dihedrals['parameter_names']["pattern"][0].index('phase')
@@ -257,19 +257,19 @@ class ForceFieldParameters:
return params
- def get_improper_params(self, improper_in, atom_types, third_id):
+ def get_improper_params(self, improper_in, atom_type, third_id):
"""
Pre-processing of getting improper dihedrals.
Args:
improper_in (ndarray): Array of input improper dihedrals.
- atom_types (ndarray): Array of the types of atoms.
+ atom_type (ndarray): Array of the types of atoms.
third_id (ndarray): Array of the third IDs.
Returns:
dict, params.
"""
- improper_atoms = np.take(atom_types, improper_in, -1)
+ improper_atoms = np.take(atom_type, improper_in, -1)
k_index = self._improper['parameter_names']["pattern"][0].index('force_constant')
phi_index = self._improper['parameter_names']["pattern"][0].index('phase')
@@ -640,7 +640,7 @@ class ForceFieldParameters:
- bonds (np.ndarray), bonds with H.
- bonds (np.ndarray), non H bonds.
"""
- hatoms = np.where(np.isin(self.atom_types, self.htypes))[0]
+ hatoms = np.where(np.isin(self.atom_type, self.htypes))[0]
bonds_with_h = np.where(np.isin(bonds, hatoms).sum(axis=-1))[0]
non_hbonds = np.where(np.isin(bonds, hatoms).sum(axis=-1) == 0)[0]
return bonds[bonds_with_h], bonds[non_hbonds]
@@ -734,9 +734,9 @@ class ForceFieldParameters:
def __call__(self, bonds):
# pylint: disable=unused-argument
bonds = bonds[0]
- atoms_types = self.atom_types.copy()
+ atoms_types = self.atom_type.copy()
vdw_params = self.get_vdw_params(atoms_types)
- atom_types = np.append(atoms_types, self._wildcard)
+ atom_type = np.append(atoms_types, self._wildcard)
bond_params = None
angle_params = None
@@ -757,14 +757,14 @@ class ForceFieldParameters:
dihedrals = self.get_dihedrals(angles, dihedral_middle_id)
dihedral_params = None
if dihedrals is not None:
- dihedral_params = self.get_dihedral_params(dihedrals, atom_types)
+ dihedral_params = self.get_dihedral_params(dihedrals, atom_type)
core_id = np.where(np.bincount(bonds.flatten()) > 2)[0]
improper = None
improper_params = None
if self._improper is not None:
checked_core_id = self.check_improper(bonds, core_id)
improper, third_id = self.get_improper(bonds, checked_core_id)
- improper_params = self.get_improper_params(improper, atom_types, third_id)
+ improper_params = self.get_improper_params(improper, atom_type, third_id)
if dihedrals is not None:
self.pair_index = self.get_pair_index(dihedrals, angles, bonds)
pair_params = self.get_pair_params(self.pair_index, vdw_params['epsilon'],
diff --git a/MindSPONGE/mindsponge/python/function/__init__.py b/MindSPONGE/mindsponge/python/function/__init__.py
index 09dbb26a99e64d14e487b9406974c54d8f2e13f2..e01c90fa723bbcef3390a80e50a24583b0e35cc0 100644
--- a/MindSPONGE/mindsponge/python/function/__init__.py
+++ b/MindSPONGE/mindsponge/python/function/__init__.py
@@ -22,9 +22,6 @@
# ============================================================================
"""Functions and Operations"""
-from mindspore import context
-from mindspore.ops import Gather
-
from .functions import *
from .units import *
from .operations import GetVector, GetDistance, VelocityGenerator, \
diff --git a/MindSPONGE/mindsponge/python/function/functions.py b/MindSPONGE/mindsponge/python/function/functions.py
index d99dcc4846d786082f1df2c0985396b80b3fe268..d0fd4efd546b40fc9cd9c93c5279f3046a282ec1 100644
--- a/MindSPONGE/mindsponge/python/function/functions.py
+++ b/MindSPONGE/mindsponge/python/function/functions.py
@@ -25,6 +25,7 @@ Common functions
"""
from typing import Union, List, Tuple
+from datetime import time, timedelta, date
import numpy as np
from numpy import ndarray
import mindspore as ms
@@ -33,6 +34,8 @@ from mindspore import ops
from mindspore import jit
from mindspore import Tensor, Parameter
from mindspore.ops import functional as F
+from mindspore.common.initializer import Initializer, _INITIALIZER_ALIAS
+
__all__ = [
'PI',
@@ -45,6 +48,10 @@ __all__ = [
'reduce_prod',
'concat_last_dim',
'concat_penulti',
+ 'stack_last_dim',
+ 'stack_penulti',
+ 'squeeze_last_dim',
+ 'squeeze_penulti',
'identity',
'periodic_variable',
'periodic_difference',
@@ -80,6 +87,8 @@ __all__ = [
'all_none',
'any_not_none',
'all_not_none',
+ 'get_arguments',
+ 'get_initializer'
]
PI = 3.141592653589793238462643383279502884197169399375105820974944592307
@@ -94,6 +103,10 @@ reduce_all = ops.ReduceAll()
reduce_prod = ops.ReduceProd()
concat_last_dim = ops.Concat(-1)
concat_penulti = ops.Concat(-2)
+stack_last_dim = ops.Stack(-1)
+stack_penulti = ops.Stack(-2)
+squeeze_last_dim = ops.Squeeze(-1)
+squeeze_penulti = ops.Squeeze(-2)
identity = ops.Identity()
@@ -327,7 +340,7 @@ def vector_in_pbc(vector: Tensor, pbc_box: Tensor, offset: float = -0.5) -> Tens
Args:
vector (Tensor): Tensor of shape `(B, ..., D)`. Data type is float.
- Vector :math:`\vec{v}
+ Vector :math:`\vec{v}`
pbc_box (Tensor): Tensor of shape `(B, D)`. Data type is float.
Size of PBC box :math:`\vec{L}`
offset (float): Offset ratio :math:`c` of the vector relative to box size :math:`\vec{L}`.
@@ -1204,7 +1217,7 @@ def any_not_none(iterable: Union[list, tuple]) -> bool:
def all_not_none(iterable: Union[list, tuple]) -> bool:
- r"""Return True if ALL values `x` in the `iterable` is Not None..
+ r"""Return True if ALL values `x` in the `iterable` is Not None.
Args:
iterable (Union[list, tuple]): Iterable variable
@@ -1217,3 +1230,112 @@ def all_not_none(iterable: Union[list, tuple]) -> bool:
"""
return all([i is not None for i in iterable])
+
+
+def get_arguments(locals_: dict, kwargs: dict = None) -> dict:
+ r"""get arguments of a class
+
+ Args:
+ locals_ (dict): Dictionary of the arguments from `locals()`.
+ kwargs (dict): Dictionary of keyword arguments (kwargs) of the class.
+
+ Returns:
+ args (dict): Dictionary of arguments
+
+ Supported Platforms:
+ ``Ascend`` ``GPU`` ``CPU``
+
+ """
+
+ if '__class__' in locals_.keys():
+ locals_.pop('__class__')
+
+ arguments = {}
+ if 'self' in locals_.keys():
+ cls = locals_.pop('self')
+ arguments['cls_name'] = cls.__class__.__name__
+
+ def _set_arguments(args_: dict):
+ def _convert(value):
+ if value is None or isinstance(value, (int, float, bool, str,
+ time, timedelta, date)):
+ return value
+ if isinstance(value, ndarray):
+ return value.tolist()
+ if isinstance(value, (Tensor, Parameter)):
+ return value.asnumpy().tolist()
+ if isinstance(value, (list, tuple)):
+ return [_convert(v) for v in value]
+ if isinstance(value, dict):
+ if 'cls_name' in value.keys():
+ return value
+ dict_ = value.copy()
+ for k, v in value.items():
+ dict_[k] = _convert(v)
+ return dict_
+
+ cls_name = value.__class__.__name__
+ if hasattr(value, '_kwargs'):
+ value = value.__dict__['_kwargs']
+ elif hasattr(value, 'init_args'):
+ value = value.__dict__['init_args']
+ else:
+ value = value.__class__.__name__
+
+ if isinstance(value, dict) and 'cls_name' not in value.keys():
+ dict_ = {'cls_name': cls_name}
+ dict_.update(_set_arguments(value))
+ value = dict_
+
+ return value
+
+ for k, v in args_.items():
+ args_[k] = _convert(v)
+ return args_
+
+ kwargs_ = {}
+ if 'kwargs' in locals_.keys():
+ kwargs_: dict = locals_.pop('kwargs')
+
+ if kwargs is None:
+ kwargs = kwargs_
+
+ if 'cls_name' in kwargs.keys():
+ kwargs.pop('cls_name')
+
+ arguments.update(_set_arguments(locals_))
+ arguments.update(_set_arguments(kwargs))
+
+ return arguments
+
+
+def get_initializer(cls_name: Union[Initializer, str, dict, Tensor], **kwargs) -> Initializer:
+ r"""get initializer by name
+
+ Args:
+ cls_name (Union[Initializer, str, dict, Tensor]): Class name of Initializer.
+ kwargs (dict): Dictionary of keyword arguments (kwargs) of the class.
+
+ Returns:
+ Initializer
+
+ Supported Platforms:
+ ``Ascend`` ``GPU`` ``CPU``
+
+ """
+ if isinstance(cls_name, Initializer):
+ return cls_name
+
+ if isinstance(cls_name, (Tensor, Parameter, ndarray)):
+ return get_tensor(cls_name, ms.float32)
+
+ if isinstance(cls_name, dict):
+ return get_initializer(**cls_name)
+
+ if isinstance(cls_name, str):
+ init = _INITIALIZER_ALIAS.get(cls_name.lower())
+ if init is None:
+ raise ValueError(f"For 'initializer', the class corresponding to '{cls_name}' was not found.")
+ return init(**kwargs)
+
+ raise TypeError(f'The cls_name must be Initializer, str, dict or Tensor but got: {init}')
diff --git a/MindSPONGE/mindsponge/python/function/operations.py b/MindSPONGE/mindsponge/python/function/operations.py
index bed9b39298fca5604fef57d9cf348b393a6f62f9..7612e7073cdee6666b6c5f4665c1a5d42d9b9df5 100644
--- a/MindSPONGE/mindsponge/python/function/operations.py
+++ b/MindSPONGE/mindsponge/python/function/operations.py
@@ -24,6 +24,7 @@
Common operations
"""
+from inspect import signature
import numpy as np
import mindspore as ms
from mindspore import numpy as msnp
@@ -159,7 +160,13 @@ class GetDistance(GetVector):
super().__init__(use_pbc=use_pbc)
- self.norm = nn.Norm(get_integer(axis), keepdims)
+ self.axis = get_integer(axis)
+ self.keepdims = keepdims
+
+ self.norm = None
+ # MindSpore < 2.0.0-rc1
+ if 'ord' not in signature(ops.norm).parameters.keys():
+ self.norm = nn.Norm(self.axis, self.keepdims)
def construct(self, initial: Tensor, terminal: Tensor, pbc_box: Tensor = None):
r"""Compute the distance from initial point to terminal point.
@@ -181,6 +188,10 @@ class GetDistance(GetVector):
"""
vector = self.calc_vector(initial, terminal, pbc_box)
+
+ if self.norm is None:
+ return ops.norm(vector, None, self.axis, self.keepdims)
+
return self.norm(vector)
@@ -316,7 +327,6 @@ class GetDistanceShift(Cell):
# (C,2)
self.bonds = bonds
- self.norm = nn.Norm(-1)
# (B,C,A)
shape = (num_walkers, bonds.shape[-2], num_atoms)
diff --git a/MindSPONGE/mindsponge/python/function/units.py b/MindSPONGE/mindsponge/python/function/units.py
index d49b41c307add75dc45573d42d04fb7e893dbb7f..a18dfb9f5a1f589be22ea9bddffffa26958819da 100644
--- a/MindSPONGE/mindsponge/python/function/units.py
+++ b/MindSPONGE/mindsponge/python/function/units.py
@@ -27,6 +27,8 @@ Units
from typing import Union
import math
+from .functions import get_arguments
+
__all__ = [
'AVOGADRO_NUMBER',
'BOLTZMANN_CONSTANT',
@@ -46,6 +48,8 @@ __all__ = [
'length_convert',
'energy_convert',
'Units',
+ 'get_length',
+ 'get_energy',
'GLOBAL_UNITS',
'set_global_length_unit',
'set_global_energy_unit',
@@ -168,7 +172,8 @@ class Length:
``Ascend`` ``GPU`` ``CPU``
"""
- def __init__(self, value: float, unit: str = 'nm'):
+ def __init__(self, value: float, unit: str = 'nm', **kwargs):
+ self._kwargs = get_arguments(locals(), kwargs)
if isinstance(value, Length):
self.__value = value.value
self.__unit = value.unit
@@ -304,7 +309,8 @@ class Energy:
``Ascend`` ``GPU`` ``CPU``
"""
- def __init__(self, value: float, unit: str = 'kj/mol'):
+ def __init__(self, value: float, unit: str = 'kj/mol', **kwargs):
+ self._kwargs = get_arguments(locals(), kwargs)
if isinstance(value, Energy):
self.__value = value.value
self.__unit = value.unit
@@ -653,7 +659,9 @@ class Units:
def __init__(self,
length_unit: str = None,
energy_unit: str = None,
+ **kwargs,
):
+ self._kwargs = get_arguments(locals(), kwargs)
self.__length_unit = get_length_unit(length_unit)
self.__length_unit_name = get_length_unit_name(length_unit)
@@ -1004,8 +1012,7 @@ class Units:
return value * self.convert_energy_from(unit)
def convert_length_to(self, unit) -> float:
- """
- convert length to a specified units.
+ """returns a scale factor that converts the length to a specified unit.
Args:
unit (Union[str, Units, Length, float, int]): Length unit.
@@ -1016,8 +1023,7 @@ class Units:
return length_convert(self.__length_unit, unit)
def convert_energy_to(self, unit) -> float:
- """
- convert energy to a specified units.
+ """returns a scale factor that converts the energy to a specified unit.
Args:
unit (Union[str, Units, Energy, float, int]): Energy unit.
@@ -1028,7 +1034,7 @@ class Units:
return energy_convert(self.__energy_unit, unit)
def convert_length_from(self, unit) -> float:
- """convert length from a specified units.
+ """returns a scale factor that converts the length from a specified unit.
Args:
unit (Union[str, Units, Length, float, int]): Length unit.
@@ -1039,8 +1045,7 @@ class Units:
return length_convert(unit, self.__length_unit)
def convert_energy_from(self, unit) -> float:
- """
- convert energy from a specified units.
+ """returns a scale factor that converts the energy from a specified unit.
Args:
unit (Union[str, Units, Energy, float, int]): Energy unit.
@@ -1051,6 +1056,24 @@ class Units:
return energy_convert(unit, self.__energy_unit)
+def get_length(length: Union[Length, float], unit: Union[str, Units] = None) -> float:
+ """get Tensor of length in specific unit"""
+ if isinstance(length, dict):
+ length = Length(**length)
+ if isinstance(length, Length):
+ return length(unit)
+ return length
+
+
+def get_energy(energy: Union[Energy, float], unit: Union[str, Units] = None) -> float:
+ """get Tensor of energy in specific unit"""
+ if isinstance(energy, dict):
+ energy = Energy(**energy)
+ if isinstance(energy, Energy):
+ return energy(unit)
+ return energy
+
+
GLOBAL_UNITS = Units('nm', 'kj/mol')
r"""Global unints of MindSPONGE"""
diff --git a/MindSPONGE/mindsponge/python/optimizer/md.py b/MindSPONGE/mindsponge/python/optimizer/md.py
index b3b07e8e9889f869cc46a8bc758fee248b7f3f2e..161b10a5e158dac7b3b0b24129c59eac13330ac5 100644
--- a/MindSPONGE/mindsponge/python/optimizer/md.py
+++ b/MindSPONGE/mindsponge/python/optimizer/md.py
@@ -32,6 +32,7 @@ from . import Updater
from ..system import Molecule
from ..control.controller import Controller
from ..control import Integrator, Thermostat, Barostat, Constraint
+from ..function import get_arguments
class UpdaterMD(Updater):
@@ -94,6 +95,7 @@ class UpdaterMD(Updater):
velocity: Union[Tensor, ndarray, List[float]] = None,
weight_decay: float = 0.0,
loss_scale: float = 1.0,
+ **kwargs,
):
super().__init__(
@@ -104,6 +106,8 @@ class UpdaterMD(Updater):
weight_decay=weight_decay,
loss_scale=loss_scale,
)
+ self._kwargs = get_arguments(locals(), kwargs)
+ self._kwargs.pop('velocity')
self.integrator: Integrator = integrator
self.integrator.set_time_step(self.time_step)
diff --git a/MindSPONGE/mindsponge/python/optimizer/steepest.py b/MindSPONGE/mindsponge/python/optimizer/steepest.py
index bf12017711c82c3f8efad4c5b3701771ad848a70..07f19d4e555dfe49d11ab64f9832e4a1ac10dde0 100644
--- a/MindSPONGE/mindsponge/python/optimizer/steepest.py
+++ b/MindSPONGE/mindsponge/python/optimizer/steepest.py
@@ -1,41 +1,163 @@
"""
Optimizer used to get the minimum value of a given function.
"""
-import mindspore as ms
-from mindspore import nn, Parameter, Tensor
-from mindspore import numpy as msnp
+from typing import Union, List, Iterable
+from mindspore import Parameter, Tensor
+from mindspore.nn.optim.optimizer import Optimizer, opt_init_args_register
+from mindspore.ops import functional as F
+from mindspore.ops import composite as C
+from mindspore.common.api import jit
+from mindspore.nn.learning_rate_schedule import LearningRateSchedule
+from mindspore import _checkparam as validator
-class SteepestDescent(nn.Optimizer):
+
+_gd_opt = C.MultitypeFuncGraph("sd_opt")
+
+
+@_gd_opt.register("Tensor", "Tensor", "Tensor")
+def _gradient_descent(learning_rate, gradient, weight):
+ """Apply sgd optimizer to the weight parameter using Tensor."""
+ success = True
+ success = F.depend(success, F.assign_add(weight, -gradient * learning_rate))
+ return success
+
+
+@_gd_opt.register("Tensor", "Float32", "Tensor", "Tensor")
+def _gradient_descent_with_shift(learning_rate, shift, gradient, weight):
+ """Apply sgd optimizer to the weight parameter using Tensor."""
+ success = True
+ origin_shift = -gradient * learning_rate
+ success = F.depend(success, F.assign_add(weight, origin_shift.clip(-shift, shift)))
+ return success
+
+
+class SteepestDescent(Optimizer):
"""
- The steepest descent (gradient descent) optimizer with growing learning rate.
+ Implements the steepest descent (gradient descent) algorithm.
+
+ Note:
+ If parameters are not grouped, the `weight_decay` in optimizer will be applied on the network parameters without
+ 'beta' or 'gamma' in their names. Users can group parameters to change the strategy of decaying weight. When
+ parameters are grouped, each group can set `weight_decay`. If not, the `weight_decay` in optimizer will be
+ applied.
Args:
- crd(tuple): Usually a tuple of parameters is given and the first element is coordinates.
- learning_rate(float): A factor of each optimize step size.
- factor(float): A growing factor of learning rate.
- nonh_mask(Tensor): The mask of atoms which are not Hydrogen.
- max_shift(float): The max step size each atom can move.
+ params (Union[list[Parameter], list[dict]]): Must be list of `Parameter` or list of `dict`. When the
+ `params` is a list of `dict`, the string "params", "lr", "grad_centralization" and
+ "order_params" are the keys can be parsed.
+
+ - params: Required. Parameters in current group. The value must be a list of `Parameter`.
+
+ - lr: Optional. If "lr" in the keys, the value of corresponding learning rate will be used.
+ If not, the `learning_rate` in optimizer will be used. Fixed and dynamic learning rate are supported.
+
+ - weight_decay: Using different `weight_decay` by grouping parameters is currently not supported.
+
+ - grad_centralization: Optional. Must be Boolean. If "grad_centralization" is in the keys, the set value
+ will be used. If not, the `grad_centralization` is False by default. This configuration only works on the
+ convolution layer.
+
+ - order_params: Optional. When parameters is grouped, this usually is used to maintain the order of
+ parameters that appeared in the network to improve performance. The value should be parameters whose
+ order will be followed in optimizer.
+ If `order_params` in the keys, other keys will be ignored and the element of 'order_params' must be in
+ one group of `params`.
+
+ learning_rate (Union[float, int, Tensor, Iterable, LearningRateSchedule]):
+
+ - float: The fixed learning rate value. Must be equal to or greater than 0.
+
+ - int: The fixed learning rate value. Must be equal to or greater than 0. It will be converted to float.
+
+ - Tensor: Its value should be a scalar or a 1-D vector. For scalar, fixed learning rate will be applied.
+ For vector, learning rate is dynamic, then the i-th step will take the i-th value as the learning rate.
+
+ - Iterable: Learning rate is dynamic. The i-th step will take the i-th value as the learning rate.
+
+ - LearningRateSchedule: Learning rate is dynamic. During training, the optimizer calls the instance of
+ LearningRateSchedule with step as the input to get the learning rate of current step.
+
+ weight_decay (Union[float, int]): An int or a floating point value for the weight decay.
+ It must be equal to or greater than 0.
+ If the type of `weight_decay` input is int, it will be converted to float. Default: 0.0.
+
+ loss_scale (float): A floating point value for the loss scale. It must be greater than 0. If the
+ type of `loss_scale` input is int, it will be converted to float. In general, use the default value. Only
+ when `FixedLossScaleManager` is used for training and the `drop_overflow_update` in
+ `FixedLossScaleManager` is set to False, this value needs to be the same as the `loss_scale` in
+ `FixedLossScaleManager`. Refer to class :class:`mindspore.amp.FixedLossScaleManager` for more details.
+ Default: 1.0.
+
+ max_shift (float): A floating point value for the max shift. It must be greater than 0. It is the bound of the
+ shift distance each iteration in the optimizer. If the max shift is set to be None, we will do nothing to
+ the shift. But if max_shift is a given float number, thus the bound of shift would be: [-max_shift,
+ max_shift]
+ Default: None
+
+ Raises:
+ TypeError: If `learning_rate` is not one of int, float, Tensor, Iterable, LearningRateSchedule.
+ TypeError: If element of `parameters` is neither Parameter nor dict.
+ TypeError: If `loss_scale` is not a float.
+ TypeError: If `weight_decay` is neither float nor int.
+ ValueError: If `loss_scale` is less than or equal to 0.
+ ValueError: If `weight_decay` is less than 0.
+ ValueError: If `learning_rate` is a Tensor, but the dimension of tensor is greater than 1.
Supported Platforms:
- ``Ascend`` ``GPU``
+ ``Ascend`` ``GPU`` ``CPU``
+
+ Examples:
+ >>> from mindsponge import Sponge, Molecule, ForceField
+ >>> from mindsponge.optimizer import SteepestDescent
+ >>>
+ >>> system = Molecule(template='water.tip3p.yaml')
+ >>> potential = ForceField(system, parameters='SPCE')
+ >>> optim = SteepestDescent(params=system.trainable_params(), learning_rate=1e-7)
+ >>>
+ >>> md = Sponge(system, potential, updater)
+ >>> md.run(1000)
+
"""
- def __init__(self, crd, learning_rate=1e-03, factor=1.001, nonh_mask=None, max_shift=1.0):
- super(SteepestDescent, self).__init__(learning_rate, crd)
- self.crd = crd[0]
- self.learning_rate = Parameter(Tensor(learning_rate, ms.float32))
- self.factor = Parameter(Tensor(factor, ms.float32))
- if nonh_mask is not None:
- self.nonh_mask = nonh_mask
+ @opt_init_args_register
+ def __init__(self,
+ params: Union[List[Parameter], List[dict]],
+ learning_rate: Union[float, int, Tensor, Iterable, LearningRateSchedule] = 1e-03,
+ weight_decay: Union[float, int] = 0.0,
+ loss_scale: float = 1.0,
+ max_shift: float = None
+ ):
+ super().__init__(
+ parameters=params,
+ learning_rate=learning_rate,
+ weight_decay=weight_decay,
+ loss_scale=loss_scale,
+ )
+ if max_shift is None:
+ self.max_shift = None
else:
- self.nonh_mask = msnp.ones((1, self.crd.shape[-2], 1))
- self.max_shift = Parameter(Tensor(max_shift, ms.float32))
+ if isinstance(max_shift, int):
+ max_shift = float(max_shift)
+ validator.check_value_type("max_shift", max_shift, [float], self.cls_name)
+ validator.check_positive_float(max_shift, "max_shift", self.cls_name)
+ self.max_shift = max_shift
+ @jit
def construct(self, gradients):
- shift = self.learning_rate*gradients[0]*self.nonh_mask
- shift = msnp.where(shift > self.max_shift, self.max_shift, shift)
- shift = msnp.where(shift < -self.max_shift, -self.max_shift, shift)
- self.crd -= shift
- self.learning_rate *= self.factor
- return self.crd
+ """update the parameters by the gradients"""
+ params = self._parameters
+ gradients = self.flatten_gradients(gradients)
+ gradients = self.gradients_centralization(gradients)
+ gradients = self.scale_grad(gradients)
+ lr = self.get_lr()
+ if self.is_group_lr:
+ if self.max_shift is not None:
+ success = self.hyper_map_reverse(F.partial(_gd_opt), lr, self.max_shift, gradients, params)
+ else:
+ success = self.hyper_map_reverse(F.partial(_gd_opt), lr, gradients, params)
+ elif self.max_shift is not None:
+ success = self.hyper_map_reverse(F.partial(_gd_opt, lr, self.max_shift), gradients, params)
+ else:
+ success = self.hyper_map_reverse(F.partial(_gd_opt, lr), gradients, params)
+ return success
diff --git a/MindSPONGE/mindsponge/python/optimizer/updater.py b/MindSPONGE/mindsponge/python/optimizer/updater.py
index 72780dbb0ddfca962fd3dac17ba62e928dff692a..3550e4a5a86f76745c10d60ca5bd863d85171fa7 100644
--- a/MindSPONGE/mindsponge/python/optimizer/updater.py
+++ b/MindSPONGE/mindsponge/python/optimizer/updater.py
@@ -37,53 +37,47 @@ from mindspore.common.initializer import initializer
from ..system import Molecule
from ..control import Controller
-from ..function import get_ms_array
+from ..function import get_ms_array, get_arguments
from ..function import functions as func
class Updater(Optimizer):
- r"""Base class of the MindSPONGE updater, which is a special subclass of the `Optimizer` in MindSpore.
-
- The `Updater` updates the atomic coordinates of the simulation system. The updating of atomic coordinates
- requires atomic forces and atomic velocities, where the force is passed from outside and the velocity is the
- parameter of the `Updater` itself. And in the case of periodic boundary conditions (PBC), the `Updater`
- could also update the size of the PBC box by the virial of the simulation system.
-
- The "Updater" controls the values of seven variables during the simulation through a series of `Controller`:
- coordinates, velocity, force, energy, kinetics, virial and pbc_box. If more than one `Controller` is passed in,
- they will work in sequence.
+ r"""
+ Base class of the MindSPONGE updater, which is a special subclass of the `Optimizer` in MindSpore.
+ The `Updater` updates the atomic coordinates of the simulation system. The updating of atomic coordinates
+ requires atomic forces and atomic velocities, where the force is passed from outside and the velocity is the
+ parameter of the `Updater` itself. And in the case of periodic boundary conditions (PBC), the `Updater`
+ could also update the size of the PBC box by the virial of the simulation system.
+ The "Updater" controls the values of seven variables during the simulation through a series of `Controller`:
+ coordinates, velocity, force, energy, kinetics, virial and pbc_box. If more than one `Controller` is passed in,
+ they will work in sequence.
Args:
-
- system (Molecule): Simulation system.
-
- controller (Union[Controller, List[Controller]]):
- Controller or list of controllers to control the seven variables (coordinate,
- velocity, force, energy, kinetics, virial and pbc_box) of the simulation system.
-
- time_step (float): Time step. Defulat: 1e-3
-
- velocity (Union[Tensor, ndarray, List[float]]):
- Array of atomic velocity. The shape of array is `(A, D)` or `(B, A, D)`, and
- the data type is float. Default: None
-
- weight_decay (float): An value for the weight decay. Default: 0
-
- loss_scale (float): A value for the loss scale. Default: 1
-
+ system(Molecule): Simulation system.
+ controller(Union[Controller, List[Controller]]): Controller or list of controllers to control the seven
+ variables (coordinate, velocity, force, energy, kinetics,
+ virial and pbc_box) of the simulation system. Default: None
+ time_step(float): Time step. Defulat: 1e-3
+ velocity(Union[Tensor, ndarray, List[float]]): Array of atomic velocity. The shape of array is `(A, D)`
+ or `(B, A, D)`, and the data type is float. Default: None
+ weight_decay(float): An value for the weight decay. Default: 0.0
+ loss_scale(float): A value for the loss scale. Default: 1.0
+
+ Inputs:
+ - **energy** (Tensor) - Energy of the system. Tensor of shape `(B, A, D)`. Data type is float.
+ - **force** (Tensor) - Force of the system. Tensor of shape `(B, A, D)`. Data type is float.
+ - **virial** (Tensor) - Virial of the system. Tensor of shape `(B, A, D)`. Data type is float. Default: None
+
+ Outputs:
+ bool, whether successfully finish the current optimization step and move to next step.
Supported Platforms:
-
``Ascend`` ``GPU``
Symbols:
-
B: Batchsize, i.e. number of walkers in simulation
-
A: Number of atoms.
-
D: Spatial dimension of the simulation system. Usually is 3.
-
"""
@opt_init_args_register
def __init__(self,
@@ -93,6 +87,7 @@ class Updater(Optimizer):
velocity: Union[Tensor, ndarray, List[float]] = None,
weight_decay: float = 0.0,
loss_scale: float = 1.0,
+ **kwargs
):
super().__init__(
@@ -101,6 +96,8 @@ class Updater(Optimizer):
weight_decay=weight_decay,
loss_scale=loss_scale,
)
+ self._kwargs = get_arguments(locals(), kwargs)
+ self._kwargs.pop('velocity')
self.time_step = Tensor(time_step, ms.float32)
@@ -135,9 +132,8 @@ class Updater(Optimizer):
self.num_constraints = 0
self.num_controller = 0
- if controller is None:
- self.controller = None
- else:
+ self.controller: List[Controller] = None
+ if controller is not None:
if isinstance(controller, Controller):
self.num_controller = 1
controller = [controller]
@@ -183,20 +179,42 @@ class Updater(Optimizer):
@property
def boltzmann(self) -> float:
+ """
+ Boltzmann constant in current unit.
+
+ Returns:
+ float, Boltzmann constant in current unit.
+ """
return self.units.boltzmann
@property
def press_unit_scale(self) -> float:
+ """
+ Reference value of pressure.
+
+ Returns:
+ float, reference value of pressure.
+ """
return self.units.pressure_ref
def set_step(self, step: int = 0):
- """set time step"""
+ """
+ Set current step of the system.
+
+ Args:
+ step(int): Current step of the system. Default: 0
+ """
step = Tensor(step, ms.int32)
F.depend(True, F.assign(self.step, step))
return self
def set_degrees_of_freedom(self, dofs: int):
- """set degrees of freedom (DOFs)"""
+ """
+ Set degrees of freedom (DOFs)
+
+ Args:
+ dofs(int): Degrees of freedom.
+ """
self.degrees_of_freedom = func.get_integer(dofs)
self.num_constraints = self.sys_dofs - self.degrees_of_freedom
for i in range(self.num_controller):
@@ -204,51 +222,127 @@ class Updater(Optimizer):
return self
def update_coordinate(self, coordinate: Tensor, success: bool = True) -> bool:
- """update the parameters of coordinate"""
+ """
+ Update the parameters of coordinate
+
+ Args:
+ coordinate(Tensor): Tensor of atomic coordinates. Data type is float.
+ success(bool): Whether to update the coordinate. Default: True
+
+ Returns:
+ bool, whether successfully update the coordinate.
+ """
return F.depend(success, F.assign(self.coordinate, coordinate))
def update_pbc_box(self, pbc_box: Tensor, success: bool = True) -> bool:
- """update the parameters of PBC box"""
+ """
+ Update the parameters of PBC box.
+
+ Args:
+ pbc_box(Tensor): Tensor of PBC box. Data type is float.
+ success(bool): Whether to update the pbc_box. Default: True
+
+ Returns:
+ bool, whether successfully update the parameters of PBC box.
+ """
if self.pbc_box is None:
return success
return F.depend(success, F.assign(self.pbc_box, pbc_box))
def update_velocity(self, velocity: Tensor, success: bool = True) -> bool:
- """update the parameters of velocity"""
+ """
+ Update the parameters of velocity.
+
+ Args:
+ velocity(Tensor): Tensor of atomic velocities. Data type is float.
+ success(bool): Whether to update the velocities. Default: True
+
+ Returns:
+ bool, whether successfully update the parameters of atomic velocities.
+ """
return F.depend(success, F.assign(self.velocity, velocity))
def update_kinetics(self, kinetics: Tensor, success: bool = True) -> bool:
- """update the parameters of kinects"""
+ """
+ Update the parameters of kinetics.
+
+ Args:
+ kinetics(Tensor): Tensor of kinetics. Data type is float.
+ success(bool): Whether to update the kinetics. Default: True
+
+ Returns:
+ bool, whether successfully update the parameters of kinetics.
+ """
if self.kinetics is None:
return success
return F.depend(success, F.assign(self.kinetics, kinetics))
def update_temperature(self, temperature: Tensor, success: bool = True) -> bool:
- """update the parameters of temperature"""
+ """
+ Update the parameters of temperature.
+
+ Args:
+ temperature(Tensor): Tensor of temperature. Data type is float.
+ success(bool): Whether to update the temperature. Default: True
+
+ Returns:
+ bool, whether successfully update the parameters of temperature.
+ """
if self.temperature is None:
return success
return F.depend(success, F.assign(self.temperature, temperature))
def update_virial(self, virial: Tensor, success: bool = True) -> bool:
- """update the parameters of virial"""
+ """
+ Update the parameters of virial.
+
+ Args:
+ virial(Tensor): Tensor of virial. Data type is float.
+ success(bool): Whether to update the virial. Default: True
+
+ Returns:
+ bool, whether successfully update the parameters of virial.
+ """
if self.pbc_box is None:
return success
return F.depend(success, F.assign(self.virial, virial))
def update_pressure(self, pressure: Tensor, success: bool = True) -> bool:
- """update the parameters of pressure"""
+ """
+ Update the parameters of pressure.
+
+ Args:
+ pressure(Tensor): Tensor of pressure. Data type is float.
+ success(bool): Whether to update the pressure. Default: True
+
+ Returns:
+ bool, whether successfully update the parameters of pressure.
+ """
if self.pbc_box is None:
return success
return F.depend(success, F.assign(self.pressure, pressure))
def get_velocity(self) -> Tensor:
- """get velocity"""
+ """
+ Get velocity.
+
+ Returns:
+ Tensor, atom velocities of the system.
+ """
if self.velocity is None:
return None
return self.identity(self.velocity)
def get_kinetics(self, velocity: Tensor) -> Tensor:
- """get kinectics"""
+ """
+ Get kinectics.
+
+ Args:
+ velocity(Tensor): Tensor of atom velocities. Data type is float.
+
+ Returns:
+ Tensor, the kinectics of the system.
+ """
# (B,A,D)
kinetics = 0.5 * self._atom_mass * velocity**2
# (B,D) <- (B,A,D)
@@ -256,13 +350,31 @@ class Updater(Optimizer):
return kinetics * self.kinetic_unit_scale
def get_temperature(self, kinetics: Tensor = None) -> Tensor:
- """get temperature"""
+ """
+ Get temperature.
+
+ Args:
+ kinetics(Tensor): Tensor of kinetics. Data type is float. Default: None
+
+ Returns:
+ Tensor, the temperature of the system.
+ """
# (B) <- (B,D)
kinetics = F.reduce_sum(kinetics, -1)
return 2 * kinetics / self.degrees_of_freedom / self.boltzmann
def get_pressure(self, kinetics: Tensor, virial: Tensor, pbc_box: Tensor) -> Tensor:
- """get pressure"""
+ """
+ Get pressure.
+
+ Args:
+ kinetics(Tensor): Tensor of kinetics. Data type is float.
+ virial(Tensor): Tensor of virial. Data type is float.
+ pbc_box(Tensor): Tensor of pbc_box. Data type is float.
+
+ Returns:
+ Tensor, the pressure of the system.
+ """
if self.pbc_box is None:
return None
# (B,D) = ((B,D) - (B, D)) / (B,1)
@@ -271,15 +383,39 @@ class Updater(Optimizer):
return pressure * self.press_unit_scale
def get_dt(self):
- """get time step"""
+ """
+ Get the learning rate of current step.
+
+ Returns:
+ float, the learning rate of current step.
+ """
return self.get_lr()
def next_step(self, success: bool = True) -> bool:
- """finish the current optimization step and move to next step"""
+ """
+ Finish the current optimization step and move to next step.
+
+ Args:
+ success(bool): Whether to finish the current optimization step and move to next step. Default: True
+
+ Returns:
+ bool, whether successfully finish the current optimization step and move to next step.
+ """
return F.depend(success, F.assign(self.step, self.step+1))
def decay_and_scale_grad(self, force: Tensor, virial: Tensor = None) -> Tuple[Tensor, Tensor]:
- """do weight decay and gradient scale for force and virial"""
+ """
+ Do weight decay and gradient scale for force and virial.
+
+ Args:
+ force(Tensor): Tensor of force. Data type is float.
+ virial(Tensor): Tensor of virial. Data type is float. Default: None
+
+ Returns:
+ - Tensor, Tensor of force after weight decay and gradient scale.
+ - Tensor, Tensor of virial after weight decay and gradient scale.
+ If pbc_box is None, the output virial is the same as input.
+ """
if self.exec_weight_decay or self.need_scale:
if self.pbc_box is None:
gradients = (force,)
diff --git a/MindSPONGE/mindsponge/python/partition/distance.py b/MindSPONGE/mindsponge/python/partition/distance.py
index 7a15172c2d74dde63a13346cb92cd22dda6eb846..a765378d07489f0493ec6eaf5c0e73b61a0d62b3 100644
--- a/MindSPONGE/mindsponge/python/partition/distance.py
+++ b/MindSPONGE/mindsponge/python/partition/distance.py
@@ -151,7 +151,7 @@ class DistanceNeighbours(Cell):
distances = self.get_distance(F.expand_dims(coordinate, -2), F.expand_dims(coordinate, -3), pbc_box)
num_neighbours = self.calc_max_neighbours(distances, self.scaled_cutoff)
num_neighbours = F.ceil(num_neighbours * scale_factor)
- self.num_neighbours = get_integer(msnp.minimum(num_neighbours, coordinate.shape[-2] - 1))
+ self.num_neighbours = get_integer(F.minimum(num_neighbours, coordinate.shape[-2] - 1))
F.assign(self.max_neighbours, self.num_neighbours)
return self
diff --git a/MindSPONGE/mindsponge/python/partition/grids.py b/MindSPONGE/mindsponge/python/partition/grids.py
index b00471562bc3cdd007bcd1bc1820e72ca4912118..849ce8c9148b605db618d94323cdbed5bbfa3b8d 100644
--- a/MindSPONGE/mindsponge/python/partition/grids.py
+++ b/MindSPONGE/mindsponge/python/partition/grids.py
@@ -207,7 +207,13 @@ class GridNeighbours(Cell):
if cell_capacity is None:
# (B, 1)
- _, max_num_in_cell = scipy.stats.mode(atom_grid_idx.asnumpy(), axis=1)
+ try:
+ # SciPy >= 1.9
+ # pylint: disable=unexpected-keyword-arg
+ _, max_num_in_cell = scipy.stats.mode(atom_grid_idx.asnumpy(), axis=1, keepdims=True)
+ except TypeError:
+ # SciPy < 1.9
+ _, max_num_in_cell = scipy.stats.mode(atom_grid_idx.asnumpy(), axis=1)
max_num_in_cell = get_integer(np.max(max_num_in_cell))
# C
cell_capacity = get_integer(msnp.ceil(max_num_in_cell*self.cell_cap_scale))
diff --git a/MindSPONGE/mindsponge/python/partition/index.py b/MindSPONGE/mindsponge/python/partition/index.py
index 5dfe304602fc1e66edf6f1f9b14f215a5e5c549d..856d444c079dcb675d6aef955ffc242422eab1b4 100644
--- a/MindSPONGE/mindsponge/python/partition/index.py
+++ b/MindSPONGE/mindsponge/python/partition/index.py
@@ -24,6 +24,8 @@
Collective variables that accept index
"""
+from inspect import signature
+
import mindspore as ms
from mindspore import ops
from mindspore.ops import functional as F
@@ -33,7 +35,7 @@ from mindspore.common import Tensor
from mindspore import numpy as msnp
from ..function import functions as func
-from ..function.operations import GetVector
+from ..function import GetVector, get_integer
__all__ = [
'IndexColvar',
@@ -108,9 +110,14 @@ class IndexDistances(IndexColvar):
super().__init__(use_pbc=use_pbc)
- self.norm_last_dim = nn.Norm(-1, keepdims)
+ self.keepdims = keepdims
self.large_dis = Tensor(large_dis, ms.float32)
+ self.norm_last_dim = None
+ # MindSpore < 2.0.0-rc1
+ if 'ord' not in signature(ops.norm).parameters.keys():
+ self.norm_last_dim = nn.Norm(-1, self.keepdims)
+
def construct(self, coordinate: Tensor, index: Tensor, mask: Tensor = None, pbc_box: Tensor = None):
r"""Compute distances between atoms according to index.
@@ -137,9 +144,9 @@ class IndexDistances(IndexColvar):
"""
- # (B,A,1,D) <- (B,A,D)
+ # (B, A, 1, D) <- (B, A, D)
atoms = F.expand_dims(coordinate, -2)
- # (B,A,N,D) <- (B,A,D)
+ # (B, A, N, D) <- (B, A, D)
neighbours = func.gather_vector(coordinate, index)
vectors = self.get_vector(atoms, neighbours, pbc_box)
@@ -147,14 +154,96 @@ class IndexDistances(IndexColvar):
# to prevent them from becoming zero values after Norm operation,
# which could lead to auto-differentiation errors
if mask is not None:
- # (B,A,N,D) = (B,A,N,D) + (B,A,N,1)
+ # (B, A, N, D) = (B, A, N, D) + (B, A, N, 1)
large_dis = msnp.broadcast_to(self.large_dis, mask.shape)
vectors += F.expand_dims(F.select(mask, F.zeros_like(large_dis), large_dis), -1)
- # (B,A,N) = (B,A,N,D)
+ # (B, A, N) <- (B, A, N, D)
+ if self.norm_last_dim is None:
+ return ops.norm(vectors, None, -1, self.keepdims)
+
return self.norm_last_dim(vectors)
+class Vector2Distance(Cell):
+ r"""Calculate distance of vector
+
+ Args:
+ axis (int): Axis of vector to be calculated. Default: -1
+
+ large_dis (float): A large value that added to the distance equal to zero to prevent them from
+ becoming zero values after Norm operation, which could lead to auto-differentiation errors.
+
+ keepdims (bool): If this is `True`, the last axis will be left in the result as dimensions with size one.
+
+ Supported Platforms:
+
+ ``Ascend`` ``GPU``
+
+ """
+ def __init__(self,
+ axis: int = -1,
+ large_dis: float = 100,
+ keepdims: bool = False,
+ ):
+
+ self.axis = get_integer(axis)
+ self.keepdims = keepdims
+ self.large_dis = Tensor(large_dis, ms.float32)
+
+ self.norm_last_dim = None
+ # MindSpore < 2.0.0-rc1
+ if 'ord' not in signature(ops.norm).parameters.keys():
+ self.norm_last_dim = nn.Norm(self.axis, self.keepdims)
+
+ def construct(self, vector: Tensor, mask: Tensor = None):
+ r"""Compute distances between atoms according to index.
+
+ Args:
+ coordinate (Tensor): Tensor of shape (B, ..., D). Data type is float.
+ Vector
+ mask (Tensor): Tensor of shape (B, ...). Data type is bool.
+ Mask for Vector
+
+ Returns:
+ distances (Tensor): Tensor of shape (B, A, N). Data type is float.
+
+ Symbols:
+
+ B: Batchsize, i.e. number of simulation walker.
+ A: Number of atoms.
+ N: Number of neighbour atoms.
+ D: Dimension of position coordinates.
+
+ """
+
+ # Add a non-zero value to the vectors whose mask value is False
+ # to prevent them from becoming zero values after Norm operation,
+ # which could lead to auto-differentiation errors
+ if mask is not None:
+ # (B, ...)
+ large_dis = msnp.broadcast_to(self.large_dis, mask.shape)
+ vector_shift = F.select(mask, F.zeros_like(large_dis), large_dis)
+ # (B, ..., 1) <- (B, ...)
+ vector_shift = F.expand_dims(vector_shift, self.axis)
+ # (B, ..., D) = (B, ..., D) + (B, .., 1)
+ vector += vector_shift
+
+ # (B, ...) <- (B, ..., D) OR (B, ..., 1) <- (B, ..., D)
+ if self.norm_last_dim is None:
+ distance = ops.norm(vector, None, self.axis, self.keepdims)
+ else:
+ distance = self.norm_last_dim(vector)
+
+ if mask is not None:
+ if self.keepdims:
+ mask = F.expand_dims(mask, self.axis)
+ # (B, ...) * (B, ...) OR (B, ..., 1) * (B, ..., 1)
+ distance *= mask
+
+ return distance
+
+
class IndexVectors(IndexColvar):
r"""Get vectors by index
diff --git a/MindSPONGE/mindsponge/python/partition/neighbourlist.py b/MindSPONGE/mindsponge/python/partition/neighbourlist.py
index 8d7941885f553cc764cce11247418ad47d7410a2..3a93c1a7c8c79741ec2a1f021aca89b0bc526791 100644
--- a/MindSPONGE/mindsponge/python/partition/neighbourlist.py
+++ b/MindSPONGE/mindsponge/python/partition/neighbourlist.py
@@ -24,6 +24,8 @@
Neighbour list
"""
+from inspect import signature
+
from typing import Tuple
import mindspore as ms
import mindspore.numpy as msnp
@@ -201,9 +203,13 @@ class NeighbourList(Cell):
self.neighbour_mask = Parameter(mask, name='neighbour_mask', requires_grad=False)
self.get_vector = GetVector(use_pbc)
- self.norm_last_dim = nn.Norm(-1, False)
self.identity = ops.Identity()
+ self.norm_last_dim = None
+ # MindSpore < 2.0.0-rc1
+ if 'ord' not in signature(ops.norm).parameters.keys():
+ self.norm_last_dim = nn.Norm(-1, False)
+
@property
def pace(self) -> int:
r"""Update frequency for neighbour list
@@ -343,9 +349,9 @@ class NeighbourList(Cell):
Returns:
neigh_idx (Tensor): Tensor of shape `(B, A, N)`. Data type is int.
Index of neighbouring atoms of each atoms in system.
- neigh_pos (Tensor): Tensor of shape `(B, A, N, D)`. Data type is float.
- Position of neighbouring atoms.
- neigh_dis (Tensor): Tensor of shape `(B, A, N, D)`. Data type is float.
+ neigh_vec (Tensor): Tensor of shape `(B, A, N, D)`. Data type is float.
+ Vectors from central atom to neighbouring atoms.
+ neigh_dis (Tensor): Tensor of shape `(B, A, N)`. Data type is float.
Distance between center atoms and neighbouring atoms.
neigh_mask (Tensor): Tensor of shape `(B, A, N)`. Data type is bool.
Mask for neighbour list `neigh_idx`.
@@ -360,24 +366,27 @@ class NeighbourList(Cell):
neigh_idx, neigh_mask = self.get_neighbour_list()
- # (B,A,1,D) <- (B,A,D)
+ # (B, A, 1, D) <- (B, A, D)
center_pos = F.expand_dims(coordinate, -2)
- # (B,A,N,D) <- (B,A,D)
- neigh_pos = gather_vector(coordinate, neigh_idx)
+ # (B, A, N, D) <- (B, A, D)
+ neigh_vec = gather_vector(coordinate, neigh_idx)
- neigh_vec = self.get_vector(center_pos, neigh_pos, pbc_box)
+ neigh_vec = self.get_vector(center_pos, neigh_vec, pbc_box)
# Add a non-zero value to the neighbour_vector whose mask value is False
# to prevent them from becoming zero values after Norm operation,
# which could lead to auto-differentiation errors
if neigh_mask is not None:
- # (B,A,N)
+ # (B, A, N)
large_dis = msnp.broadcast_to(self.large_dis, neigh_mask.shape)
large_dis = F.select(neigh_mask, F.zeros_like(large_dis), large_dis)
- # (B,A,N,D) = (B,A,N,D) + (B,A,N,1)
+ # (B, A, N, D) = (B, A, N, D) + (B, A, N, 1)
neigh_vec += F.expand_dims(large_dis, -1)
- # (B,A,N) = (B,A,N,D)
- neigh_dis = self.norm_last_dim(neigh_vec)
+ # (B, A, N) <- (B, A, N, D)
+ if self.norm_last_dim is None:
+ neigh_dis = ops.norm(neigh_vec, None, -1)
+ else:
+ neigh_dis = self.norm_last_dim(neigh_vec)
- return neigh_idx, neigh_pos, neigh_dis, neigh_mask
+ return neigh_idx, neigh_vec, neigh_dis, neigh_mask
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/__init__.py b/MindSPONGE/mindsponge/python/pipeline/models/__init__.py
index 32af20b3559ebd0d80a801dbc58490f7479df722..4dc8b075151fddc5277fed969f375478a5a99879 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/__init__.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/__init__.py
@@ -26,6 +26,8 @@ from .deepdr import DeepDR, DeepDRDataSet, deepdr_configuration
from .deepfri import DeepFri, DeepFriDataSet, deepfri_configuration
from .esm_if1 import ESM, ESMDataSet, esm_configuration
from .esm2 import ESM2, ESM2DataSet, esm2_configuration
+from .graphdta import GraphDTA, GraphDTADataSet, graphdta_configuration
+from .grover import Grover, GroverDataSet, grover_configuration
from .kgnn import KGNN, KGNNDataSet, kgnn_configuration
from .megaassessment import MEGAAssessment, MEGAAssessmentDataSet, megaassessment_configuration
from .megaevogen import MEGAEvoGen, MEGAEvoGenDataSet, megaevogen_configuration
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/__init__.py b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/__init__.py
index eeda7e6ab3edc6a6c78764f849baa1089035d1b2..105ab913bf973d2ec4c00fd3a93146c03f0c8dce 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/__init__.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/__init__.py
@@ -1,12 +1,4 @@
-# Copyright 2023 @ Shenzhen Bay Laboratory &
-# Peking University &
-# Huawei Technologies Co., Ltd
-#
-# This code is a part of MindSPONGE:
-# MindSpore Simulation Package tOwards Next Generation molecular modelling.
-#
-# MindSPONGE is open-source software based on the AI-framework:
-# MindSpore (https://www.mindspore.cn/)
+# Copyright 2022-2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign.py b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign.py
index 5e96511be50da8597ef9e892b248c656499489d7..3813bf7655a36406559b5833388f2cf4037a33c2 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign.py
@@ -1,12 +1,4 @@
-# Copyright 2023 @ Shenzhen Bay Laboratory &
-# Peking University &
-# Huawei Technologies Co., Ltd
-#
-# This code is a part of MindSPONGE:
-# MindSpore Simulation Package tOwards Next Generation molecular modelling.
-#
-# MindSPONGE is open-source software based on the AI-framework:
-# MindSpore (https://www.mindspore.cn/)
+# Copyright 2022-2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,15 +15,15 @@
"""colabdesign"""
import numpy as np
-from mindspore import Parameter
-from mindspore import Tensor, load_checkpoint
import mindspore as ms
+from mindspore import Parameter
+from mindspore import Tensor
from mindspore import jit, context
-from .nn_arch import Colabdesign
-from ..model import Model
from .module.design_wrapcell import TrainOneStepCell, WithLossCell
from .module.utils import get_weights, get_lr, get_opt
+from .nn_arch import Colabdesign
+from ..model import Model
class COLABDESIGN(Model):
@@ -58,25 +50,38 @@ class COLABDESIGN(Model):
self.config = config
self.use_jit = self.config.use_jit
self.checkpoint_url = \
- 'https://download.mindspore.cn/mindscience/mindsponge/Multimer/checkpoint/Multimer_Model_1.ckpt'
+ 'https://download.mindspore.cn/mindscience/mindsponge/ColabDesign/checkpoint/ColabDesign.ckpt'
self.checkpoint_path = "./colabdesign.ckpt"
seq_vector = 0.01 * np.random.normal(0, 1, size=(1, 100, 20))
- self.network = Colabdesign(self.config, self.mixed_precision, Tensor(seq_vector, ms.float16), 100,
+ self.network = Colabdesign(self.config, self.mixed_precision, Tensor(seq_vector, ms.float32), 100,
protocol=self.config.protocol)
- load_checkpoint(self.checkpoint_path, self.network)
+ super().__init__(self.checkpoint_url, self.checkpoint_path, self.network, self.name)
net_with_criterion = WithLossCell(self.network)
- soft_weights, temp_weights = get_weights(self.config, self.config.soft_iters, self.config.temp_iters,
- self.config.hard_iters)
+ soft_weights, _, temp_weights = get_weights(self.config, self.config.soft_iters, self.config.temp_iters,
+ self.config.hard_iters)
epoch = self.config.soft_iters + self.config.temp_iters + self.config.hard_iters
lr = get_lr(temp_weights, soft_weights, epoch)
- model_params = [Parameter(Tensor(seq_vector, ms.float16))]
+ model_params = [Parameter(Tensor(seq_vector, ms.float32), name="seq_vector", requires_grad=True)]
opt = get_opt(model_params, lr, 0.0, self.config.opt_choice)
self.train_net = TrainOneStepCell(net_with_criterion, opt, sens=8192)
- super().__init__(self.checkpoint_url, self.checkpoint_path, self.network, self.name)
# pylint: disable=arguments-differ
def predict(self, data):
- pass
+ temp, soft, hard = get_weights(self.config, self.config.soft_iters, self.config.temp_iters,
+ self.config.hard_iters)
+ best = 999
+ for epoch in range(30):
+ temp_step = temp[epoch]
+ soft_step = soft[epoch]
+ hard_step = hard[epoch]
+ data[-6] = temp_step
+ data[-5] = soft_step
+ data[-4] = hard_step
+ inputs_feats = [Tensor(feat) for feat in data]
+ loss = self._jit_forward(inputs_feats)
+ if loss < best:
+ best = loss
+ return best
def forward(self, data):
pass
@@ -84,22 +89,16 @@ class COLABDESIGN(Model):
# pylint: disable=arguments-differ
@jit
def backward(self, feat):
- loss = self.train_net(*feat)
- return loss
+ pass
# pylint: disable=arguments-differ
def train_step(self, data):
- features = []
- for feature in data:
- features.append(Tensor(data[feature]))
-
- loss = self.backward(features)
-
- return loss
+ pass
def _pynative_forward(self, data):
pass
@jit
def _jit_forward(self, data):
- pass
+ loss = self.train_net(*data)
+ return loss
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_configuratuin.py b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_configuratuin.py
index db679a98e7f8ad8bf937746a2a6f78e484745bab..1339830d5476afd323f243b2810a09693cb3735a 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_configuratuin.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_configuratuin.py
@@ -1,12 +1,4 @@
-# Copyright 2023 @ Shenzhen Bay Laboratory &
-# Peking University &
-# Huawei Technologies Co., Ltd
-#
-# This code is a part of MindSPONGE:
-# MindSpore Simulation Package tOwards Next Generation molecular modelling.
-#
-# MindSPONGE is open-source software based on the AI-framework:
-# MindSpore (https://www.mindspore.cn/)
+# Copyright 2022-2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,5 +14,5 @@
# ============================================================================
"""colabdesign_configuration"""
colabdesign_configuration = {
- "fold_design": "https://download.mindspore.cn/mindscience/mindsponge/Multimer/config/"
+ "fold_design": "https://download.mindspore.cn/mindscience/mindsponge/ColabDesign/config/fold_design.yaml"
}
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_data.py b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_data.py
index 57c0c41d501305f60d262ad5a895eea6ba9cbb42..5742c7ad526f29b877d28b1931a7b4d8db787aa4 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_data.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_data.py
@@ -1,12 +1,4 @@
-# Copyright 2023 @ Shenzhen Bay Laboratory &
-# Peking University &
-# Huawei Technologies Co., Ltd
-#
-# This code is a part of MindSPONGE:
-# MindSpore Simulation Package tOwards Next Generation molecular modelling.
-#
-# MindSPONGE is open-source software based on the AI-framework:
-# MindSpore (https://www.mindspore.cn/)
+# Copyright 2022-2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,7 +15,9 @@
"""colabdesign data"""
import numpy as np
import mindsponge.common.residue_constants as residue_constants
+import mindsponge.common.protein as protein
+from .module.utils import pdb_to_string, _np_get_cb
from ...dataset import curry1
from ....common import residue_constants
@@ -96,6 +90,359 @@ def correct_restypes(feature, key):
return feature
+NUM_RES = 'num residues placeholder'
+NUM_MSA_SEQ = 'msa placeholder'
+NUM_EXTRA_SEQ = 'extra msa placeholder'
+NUM_TEMPLATES = 'num templates placeholder'
+
+
+def prep_pos(pos, residue, chain):
+ '''
+ given input positions (a string of segment ranges separated by comma,
+ for example: "1,3-4,10-15"), return list of indices to constrain.
+ '''
+ residue_set = []
+ chain_set = []
+ len_set = []
+ for idx in pos.split(","):
+ i, j = idx.split("-") if "-" in idx else (idx, None)
+
+ # if chain defined
+ if i[0].isalpha():
+ c, i = i[0], int(i[1:])
+ else:
+ c, i = chain[0], int(i)
+ if j is None:
+ j = i
+ else:
+ j = int(j[1:] if j[0].isalpha() else j)
+ residue_set += list(range(i, j + 1))
+ chain_set += [c] * (j - i + 1)
+ len_set += [j - i + 1]
+
+ residue = np.asarray(residue)
+ chain = np.asarray(chain)
+ pos_set = []
+ for i, c in zip(residue_set, chain_set):
+ idx = np.where((residue == i) & (chain == c))[0]
+ assert len(idx) == 1, f'ERROR: positions {i} and chain {c} not found'
+ pos_set.append(idx[0])
+
+ return {"residue": np.array(residue_set),
+ "chain": np.array(chain_set),
+ "length": np.array(len_set),
+ "pos": np.asarray(pos_set)}
+
+
+class DesignPrep:
+ """DesignPrep"""""
+
+ def __init__(self, cfg, num_seq=1):
+ self.cfg_path = cfg
+ self.seq_length = self.cfg_path.seq_length
+ self.pad_length = self.cfg_path.seq_length
+ self.ori_seq_len = self.cfg_path.seq_length
+ self.msa_channel = self.cfg_path.model.msa_channel
+ self.pair_channel = self.cfg_path.model.pair_channel
+ self.extra_msa_num = self.cfg_path.data.max_extra_msa
+ self.template_num = self.cfg_path.eval.max_templates
+ self.msa_num = self.cfg_path.eval.max_msa_clusters - self.template_num
+
+ self._num = num_seq
+
+ def prep_feature(self, pdb_filename, chain, protocol, lengths=100, nums=1):
+ """prep_feature"""""
+ if protocol == 'fixbb':
+ self._prep_fixbb(pdb_filename=pdb_filename, chain=chain)
+ x = 0.01 * np.random.normal(0, 1, size=(self._num, self.ori_seq_len, 20))
+ arrays, new_feature, ori_seq_len = self.transfer_input(x)
+ elif protocol == 'hallucination':
+ self._prep_haillucination(lengths, nums)
+ x = 0.01 * np.random.normal(0, 1, size=(nums, lengths, 20))
+ arrays, new_feature, ori_seq_len = self.transfer_input(x)
+ return arrays, new_feature, ori_seq_len
+
+ def prep_input_features(self, length, num=1, templates=1, enums=1):
+ '''
+ given [L]ength, [N]umber of sequences and number of Templates
+ return dictionary of blank features
+ '''
+ aatype = np.zeros(length, int)
+ setattr(self, "aatype", aatype)
+ msa_feat = np.zeros((num, length, 49))
+ setattr(self, "msa_feat", msa_feat)
+ msa_mask = np.ones((num, length))
+ setattr(self, "msa_mask", msa_mask)
+ atom37_atom_exists = np.ones((length, 37))
+ setattr(self, "atom37_atom_exists", atom37_atom_exists)
+ residx_atom37_to_atom14 = np.zeros((length, 37), int)
+ setattr(self, "residx_atom37_to_atom14", residx_atom37_to_atom14)
+ residue_index = np.arange(length)
+ setattr(self, "residue_index", residue_index)
+ extra_deletion_value = np.zeros((enums, length))
+ setattr(self, "extra_deletion_value", extra_deletion_value)
+ extra_has_deletion = np.zeros((enums, length))
+ setattr(self, "extra_has_deletion", extra_has_deletion)
+ extra_msa = np.zeros((enums, length), int)
+ setattr(self, "extra_msa", extra_msa)
+ extra_msa_mask = np.zeros((enums, length))
+ setattr(self, "extra_msa_mask", extra_msa_mask)
+
+ # for template inputs
+ template_aatype = np.zeros((templates, length), int)
+ setattr(self, "template_aatype", template_aatype)
+ template_all_atom_mask = np.zeros((templates, length, 37))
+ setattr(self, "template_all_atom_masks", template_all_atom_mask)
+ template_all_atom_positions = np.zeros((templates, length, 37, 3))
+ setattr(self, "template_all_atom_positions", template_all_atom_positions)
+ template_mask = np.zeros(templates)
+ setattr(self, "template_mask", template_mask)
+ template_pseudo_beta = np.zeros((templates, length, 3))
+ setattr(self, "template_pseudo_beta", template_pseudo_beta)
+ template_pseudo_beta_mask = np.zeros((templates, length))
+ setattr(self, "template_pseudo_beta_mask", template_pseudo_beta_mask)
+
+ def transfer_input(self, d_params=None):
+ """transfer_input"""
+ seq_length = self.seq_length
+ msa_channel = self.msa_channel
+ pair_channel = self.pair_channel
+
+ extra_msa_num = self.extra_msa_num
+ template_num = self.template_num
+ msa_num = self.msa_num
+ ori_seq_len = self.aatype.shape[0]
+ pad_length = seq_length - ori_seq_len
+ new_feature = {}
+
+ if d_params is not None:
+ new_feature['params_seq'] = np.array(d_params).astype(np.float32)
+ new_feature['params_seq'] = np.pad(new_feature.get('params_seq'), ((0, 0), (0, pad_length), (0, 0)),
+ constant_values=(0, 0))
+ new_feature['target_feat'] = self.msa_feat[0, :, :21]
+ new_feature['target_feat'] = np.pad(new_feature.get('target_feat'), [[0, 0], [1, 0]])
+ new_feature['target_feat'] = np.pad(new_feature.get('target_feat'), [[0, pad_length], [0, 0]])
+ new_feature['prev_pos'] = np.zeros((seq_length, 37, 3)).astype(np.float32)
+ new_feature['prev_msa_first_row'] = np.zeros((seq_length, msa_channel)).astype(np.float32)
+ new_feature['prev_pair'] = np.zeros((seq_length, seq_length, pair_channel)).astype(np.float32)
+ ori_msa_feat = self.msa_feat.shape[0]
+ new_feature['msa_feat'] = np.pad(self.msa_feat, ((0, msa_num - ori_msa_feat), (0, pad_length), (0, 0)),
+ constant_values=(0, 0))
+
+ new_feature['msa_mask'] = np.pad(self.msa_mask, ((0, msa_num - ori_msa_feat), (0, pad_length)),
+ constant_values=(0, 0)).astype(np.float32)
+ new_feature['seq_mask_batch'] = np.ones((ori_seq_len)).astype(np.float32)
+ new_feature['seq_mask_batch'] = np.pad(new_feature.get('seq_mask_batch'), ((0, pad_length)),
+ constant_values=(0, 0))
+ new_feature['aatype_batch'] = np.pad(self.aatype, ((0, pad_length)), constant_values=(0, 0)).astype(np.int32)
+
+ new_feature["template_aatype"] = self.template_aatype
+ new_feature["template_all_atom_masks"] = self.template_all_atom_masks
+ new_feature["template_all_atom_positions"] = self.template_all_atom_positions
+ new_feature["template_mask"] = self.template_mask
+ new_feature["template_pseudo_beta_mask"] = self.template_pseudo_beta_mask
+ new_feature["template_pseudo_beta"] = self.template_pseudo_beta
+ ori_template_num = self.template_aatype.shape[0]
+ new_feature['template_aatype'] = np.pad(new_feature.get('template_aatype'),
+ ((0, template_num - ori_template_num), (0, pad_length)),
+ constant_values=(0, 0)).astype(np.int32)
+
+ new_feature['template_all_atom_masks'] = np.pad(new_feature.get('template_all_atom_masks'),
+ ((0, template_num - ori_template_num), (0, pad_length), (0, 0)),
+ constant_values=(0, 0)).astype(np.float32)
+ new_feature['template_mask'] = np.pad(new_feature.get('template_mask'), ((0, template_num - ori_template_num)),
+ constant_values=(0)).astype(np.float32)
+ new_feature["template_all_atom_positions"] = np.pad(new_feature.get("template_all_atom_positions"),
+ ((0, template_num - ori_template_num), (0, pad_length),
+ (0, 0),
+ (0, 0)),
+ constant_values=(0, 0)).astype(np.float32)
+ new_feature['template_pseudo_beta'] = np.pad(new_feature.get("template_pseudo_beta"),
+ ((0, template_num - ori_template_num), (0, pad_length), (0, 0)),
+ constant_values=(0, 0)).astype(np.float32)
+
+ new_feature['template_pseudo_beta_mask'] = np.pad(new_feature.get("template_pseudo_beta_mask"),
+ ((0, template_num - ori_template_num), (0, pad_length)),
+ constant_values=(0, 0)).astype(np.float32)
+ ori_extra_msa_num = self.extra_msa.shape[0]
+ new_feature['extra_msa'] = np.pad(self.extra_msa,
+ ((0, extra_msa_num - ori_extra_msa_num), (0, pad_length)),
+ constant_values=(0, 0)).astype(np.int32)
+ new_feature['extra_has_deletion'] = np.pad(self.extra_has_deletion,
+ ((0, extra_msa_num - ori_extra_msa_num), (0, pad_length)),
+ constant_values=(0, 0)).astype(np.float32)
+ new_feature['extra_deletion_value'] = np.pad(self.extra_deletion_value,
+ ((0, extra_msa_num - ori_extra_msa_num), (0, pad_length)),
+ constant_values=(0, 0)).astype(np.float32)
+ new_feature['extra_msa_mask'] = np.pad(self.extra_msa_mask,
+ ((0, extra_msa_num - ori_extra_msa_num), (0, pad_length)),
+ constant_values=(0, 0)).astype(np.float32)
+ new_feature['residx_atom37_to_atom14'] = np.pad(self.residx_atom37_to_atom14,
+ ((0, pad_length), (0, 0)),
+ constant_values=(0, 0)).astype(np.int32)
+ new_feature['atom37_atom_exists_batch'] = np.pad(self.atom37_atom_exists, ((0, pad_length), (0, 0)),
+ constant_values=(0, 0)).astype(np.float32)
+ new_feature['residue_index_batch'] = np.pad(self.residue_index, ((0, pad_length)),
+ constant_values=(0, 0)).astype(np.int32)
+
+ if hasattr(self, 'batch_aatype'):
+ new_feature["batch_aatype"] = np.pad(self.batch_aatype, ((0, pad_length)),
+ constant_values=(0, 0)).astype(np.float32)
+ new_feature["batch_all_atom_mask"] = np.pad(self.batch_all_atom_mask, ((0, pad_length), (0, 0)),
+ constant_values=(0, 0)).astype(np.float32)
+ new_feature["batch_all_atom_positions"] = np.pad(self.batch_all_atom_positions,
+ ((0, pad_length), (0, 0), (0, 0)),
+ constant_values=(0, 0)).astype(np.float32)
+ else:
+ new_feature["batch_aatype"] = np.ones(shape=(seq_length,)).astype(np.float32)
+ new_feature["batch_all_atom_mask"] = np.ones(shape=(seq_length, 37)).astype(np.float32)
+ new_feature["batch_all_atom_positions"] = np.ones(shape=(seq_length, 37, 3)).astype(np.float32)
+
+ input_keys = ["msa_feat", "msa_mask", "seq_mask_batch", \
+ "template_aatype", "template_all_atom_masks", "template_all_atom_positions", "template_mask", \
+ "template_pseudo_beta_mask", "template_pseudo_beta", \
+ "extra_msa", "extra_has_deletion", "extra_deletion_value", "extra_msa_mask", \
+ "residx_atom37_to_atom14", "atom37_atom_exists_batch", \
+ "residue_index_batch", "batch_aatype", "batch_all_atom_positions", "batch_all_atom_mask"]
+ arrays = [new_feature.get(key) for key in input_keys]
+ return arrays, new_feature, ori_seq_len
+
+ def _prep_features(self, num_res, num_seq=None, num_templates=1):
+ '''process features'''
+ if num_seq is None:
+ num_seq = self._num
+ self.prep_input_features(length=num_res, num=num_seq, templates=num_templates)
+
+ def _prep_fixbb(self, pdb_filename, chain="A",
+ rm_template_seq=True, rm_template_sc=True, ignore_missing=True):
+ """_prep_fixbb"""
+ o = extract_pdb(pdb_filename, chain=chain, ignore_missing=ignore_missing)
+ pdb_residue_index, pdb_idx, pdb_lengths, pdb_batch = \
+ o.get("residue_index"), o.get("idx"), o.get("lengths"), o.get("batch")
+ self.ori_seq_len = pdb_residue_index.shape[0]
+ self.pad_length = self.seq_length - self.ori_seq_len
+ # feat dims
+ num_seq = self._num
+ res_idx = pdb_residue_index
+
+ # configure input features
+ self._prep_features(num_res=sum(pdb_lengths), num_seq=num_seq)
+ setattr(self, "residue_index", res_idx)
+ batch_aatype, batch_all_atom_mask, batch_all_atom_positions = make_fixed_size(pdb_batch,
+ num_res=sum(pdb_lengths))
+ setattr(self, "batch_aatype", batch_aatype)
+ setattr(self, "batch_all_atom_mask", batch_all_atom_mask)
+ setattr(self, "batch_all_atom_positions", batch_all_atom_positions)
+
+ rm, leng = {}, sum(pdb_lengths)
+ for n, x in [["rm_seq", rm_template_seq], ["rm_sc", rm_template_sc]]:
+ rm[n] = np.full(leng, False)
+ if isinstance(x, str):
+ rm.get(n)[prep_pos(x, **pdb_idx).get("pos")] = True
+ else:
+ rm.get(n)[:] = x
+
+ def _prep_haillucination(self, length=100, num=1):
+ """_prep_haillucination"""
+ self._prep_features(num_res=length, num_seq=num)
+ setattr(self, "residue_index", np.arange(length))
+
+
+def extract_pdb(pdb_filename, chain=None,
+ offsets=None, lengths=None,
+ ignore_missing=False):
+ """extract_pdb"""
+
+ def add_atom(batch):
+ """add missing CB atoms based on N,CA,C"""
+ atom_idx = residue_constants.atom_order
+ p, m = batch.get("all_atom_positions"), batch.get("all_atom_mask")
+
+ atoms = {k: p[..., atom_idx[k], :] for k in ["N", "CA", "C"]}
+ cb = atom_idx["CB"]
+
+ cb_mask = np.prod([m[..., atom_idx[k]] for k in ["N", "CA", "C"]], 0)
+ cb_atoms = _np_get_cb(atoms.get("N"), atoms.get("CA"), atoms.get("C"))
+ batch["all_atom_positions"][..., cb, :] = np.where(m[:, cb, None], p[:, cb, :], cb_atoms)
+ batch["all_atom_mask"][..., cb] = (m[:, cb] + cb_mask) > 0
+ return {"atoms": batch["all_atom_positions"][:, cb], "mask": cb_mask}
+
+ # go through each defined chain
+ chains = [None] if chain is None else chain.split(",")
+ o, last = [], 0
+ residue_idx, chain_idx = [], []
+ full_lengths = []
+
+ for n, simplechain in enumerate(chains):
+ protein_obj = protein.from_pdb_string(pdb_to_string(pdb_filename), chain_id=simplechain)
+ batch = {'aatype': protein_obj.aatype,
+ 'all_atom_positions': protein_obj.atom_positions,
+ 'all_atom_mask': protein_obj.atom_mask,
+ 'residue_index': protein_obj.residue_index}
+
+ cb_feat = add_atom(batch)
+
+ im = ignore_missing[n] if isinstance(ignore_missing, list) else ignore_missing
+ if im:
+ replies = batch.get("all_atom_mask")[:, 0] == 1
+ for key in batch:
+ batch[key] = batch.get(key)[replies]
+ residue_index = batch.get("residue_index") + last
+
+ else:
+ offset = 0 if offsets is None else (offsets[n] if isinstance(offsets, list) else offsets)
+ replies = offset + (protein_obj.residue_index - protein_obj.residue_index.min())
+ lengs = (replies.max() + 1) if lengths is None else (lengths[n] if isinstance(lengths, list) else lengths)
+
+ def scatter(x, value=0, lens=0, re=0):
+ shape = (lens,) + x.shape[1:]
+ y = np.full(shape, value, dtype=x.dtype)
+ y[re] = x
+ return y
+
+ batch = {"aatype": scatter(batch.get("aatype"), -1, lens=lengs, re=replies),
+ "all_atom_positions": scatter(batch.get("all_atom_positions"), lens=lengs, re=replies),
+ "all_atom_mask": scatter(batch.get("all_atom_mask"), lens=lengs, re=replies),
+ "residue_index": scatter(batch.get("residue_index"), -1, lens=lengs, re=replies)}
+
+ residue_index = np.arange(lengs) + last
+
+ last = residue_index[-1] + 50
+ o.append({"batch": batch,
+ "residue_index": residue_index,
+ "cb_feat": cb_feat})
+
+ residue_idx.append(batch.pop("residue_index"))
+ chain_idx.append([chain] * len(residue_idx[-1]))
+ full_lengths.append(len(residue_index))
+
+ # concatenate chains
+ o_inter = {}
+ for i, feature in enumerate(o):
+ for key in feature.keys():
+ if i == 0:
+ o_inter[key] = feature.get(key)
+ else:
+ o_inter[key] = np.concatenate((o_inter.get(key), feature.get(key)), 0)
+
+ o = o_inter
+ o["idx"] = {"residue": np.concatenate(residue_idx), "chain": np.concatenate(chain_idx)}
+ o["lengths"] = full_lengths
+ return o
+
+
+def make_fixed_size(feat, num_res):
+ """"make_fixed_size"""
+
+ for k, v in feat.items():
+ if k == "batch":
+ feat[k] = make_fixed_size(v, num_res)
+ else:
+ continue
+
+ return feat.get("aatype"), feat.get("all_atom_mask"), feat.get("all_atom_positions")
+
+
@curry1
def prep(feature=None, cfg=None):
prev_pos = np.zeros((cfg.seq_length, 37, 3)).astype(np.float32)
@@ -108,7 +455,7 @@ def prep(feature=None, cfg=None):
@curry1
-def get_weights(feature=None, index=None, cfg=None):
+def get_weights(feature=None, cfg=None):
"""get weights"""
opt_temp = []
opt_soft = []
@@ -129,7 +476,7 @@ def get_weights(feature=None, index=None, cfg=None):
cfg.hard_etemp + (cfg.hard_temp - cfg.hard_etemp) * (1 - (i + 1) / cfg.hard_iters) ** 2)
opt_soft.append(cfg.hard_esoft + (cfg.hard_soft - cfg.hard_esoft) * ((i + 1) / cfg.hard_iters))
opt_hard.append(cfg.hard_decay + (cfg.hard_value - cfg.hard_decay) * ((i + 1) / cfg.hard_iters))
- feature.append(opt_temp[index])
- feature.append(opt_soft[index])
- feature.append(opt_hard[index])
+ feature.append(opt_temp)
+ feature.append(opt_soft)
+ feature.append(opt_hard)
return feature
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_dataset.py b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_dataset.py
index 2467b682138f79d1c1919b99cb846707f12665d5..439bfa7204ceb66d31d3d3af054118566cd3e9b8 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_dataset.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/colabdesign_dataset.py
@@ -1,12 +1,4 @@
-# Copyright 2023 @ Shenzhen Bay Laboratory &
-# Peking University &
-# Huawei Technologies Co., Ltd
-#
-# This code is a part of MindSPONGE:
-# MindSpore Simulation Package tOwards Next Generation molecular modelling.
-#
-# MindSPONGE is open-source software based on the AI-framework:
-# MindSpore (https://www.mindspore.cn/)
+# Copyright 2022-2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,12 +14,10 @@
# ============================================================================
"""colabdesign dataset"""
import os
-import pickle
-
from mindspore.dataset import GeneratorDataset
+from .colabdesign_data import prep, get_weights, DesignPrep
from ...dataset import PSP, data_process_run
-from .colabdesign_data import prep, get_weights
class ColabDesignDataSet(PSP):
@@ -38,24 +28,22 @@ class ColabDesignDataSet(PSP):
self.supported_models = ['ColabDesign']
self.in_memory = False
self.colabdesign_inputs()
- self.indx = 0
self.training_data_src = ""
self.training_pkl_path = ""
self.training_pdb_path = ""
self.training_pdb_items = ""
self.training_pkl_items = ""
- self.data_process = [get_weights(self.indx, cfg=config), prep(cfg=config)]
+ self.data_process = [get_weights(cfg=self.config), prep(cfg=self.config)]
self._num = num_seq
super().__init__()
+ # pylint: disable=arguments-differ
def __getitem__(self, idx):
if self.in_memory:
data = self.inputs[idx]
else:
data = self.data_parse(idx)
-
- self.indx += 1
features = self.process(data)
return tuple(features)
@@ -76,14 +64,22 @@ class ColabDesignDataSet(PSP):
# pylint: disable=arguments-differ
def data_parse(self, idx):
- pkl_path = self.training_pkl_items[idx]
- f = open(pkl_path, "rb")
- data = pickle.load(f)
- return data
+ pdb_path = self.training_pdb_items[idx]
+ data_prep = DesignPrep(self.config)
+ inputs_feats, _, _ = data_prep.prep_feature(pdb_filename=pdb_path, chain="A", \
+ protocol=self.config.protocol)
+ return inputs_feats
# pylint: disable=arguments-differ
def process(self, data):
- features = data_process_run(data.copy(), self.data_process)
+ data_prep = DesignPrep(self.config)
+ pdb_path = data
+ inputs_feats, new_feature, _ = data_prep.prep_feature(pdb_filename=pdb_path, chain="A",
+ protocol=self.config.protocol)
+ features = data_process_run(inputs_feats, self.data_process)
+ features[-3] = new_feature.get('prev_pos')
+ features[-2] = new_feature.get('prev_msa_first_row')
+ features[-1] = new_feature.get('prev_pair')
return features
def set_training_data_src(self, data_src):
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/module/__init__.py b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/module/__init__.py
index 9d27dd78d05d135f5c629cc6a40a9e8c96ae6cae..8301edc54cb5a00925d4aac3f1b3220b271307b1 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/module/__init__.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/module/__init__.py
@@ -1,12 +1,4 @@
-# Copyright 2023 @ Shenzhen Bay Laboratory &
-# Peking University &
-# Huawei Technologies Co., Ltd
-#
-# This code is a part of MindSPONGE:
-# MindSpore Simulation Package tOwards Next Generation molecular modelling.
-#
-# MindSPONGE is open-source software based on the AI-framework:
-# MindSpore (https://www.mindspore.cn/)
+# Copyright 2022-2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/module/utils.py b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/module/utils.py
index c418ab46d262d5da9af7505e7789531cb4d0eadf..0e806f623d6559c79d7b7b189dfc8e05c5f20fa3 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/module/utils.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/module/utils.py
@@ -1,4 +1,4 @@
-# Copyright 2022 Huawei Technologies Co., Ltd
+# Copyright 2022-2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,7 +13,10 @@
# limitations under the License.
# ============================================================================
"""learning rate"""
+from string import ascii_uppercase, ascii_lowercase
import numpy as np
+
+
import mindspore.nn as nn
import mindsponge.common.residue_constants as residue_constants
@@ -39,7 +42,7 @@ def get_weights(config, soft_iters, temp_iters, hard_iters):
config.hard_etemp + (config.hard_temp - config.hard_etemp) * (1 - (i + 1) / hard_iters) ** 2)
opt_soft.append(config.hard_esoft + (config.hard_soft - config.hard_esoft) * ((i + 1) / hard_iters))
opt_hard.append(config.hard_decay + (config.hard_value - config.hard_decay) * ((i + 1) / hard_iters))
- return opt_temp, opt_hard
+ return opt_temp, opt_soft, opt_hard
def get_lr(opt_temps, opt_softs, epoch, lr=0.1):
@@ -65,3 +68,66 @@ def get_seqs(seq_hard):
order_aa = {b: a for a, b in aa_order.items()}
x = seq_hard.argmax(-1)
return ["".join(order_aa[a] for a in s) for s in x]
+
+
+# in shared
+alphabet_list = list(ascii_uppercase + ascii_lowercase)
+
+MODRES = {'TPO': 'THR', 'CSO': 'CYS', 'SEP': 'SER', 'M3L': 'LYS',
+ 'MSE': 'MET', 'MLY': 'LYS', 'FME': 'MET', 'HYP': 'PRO',
+ '4BF': 'TYR', 'KCX': 'LYS', 'B3E': 'GLU', 'B3D': 'ASP',
+ 'HSK': 'HIS', 'SAC': 'SER', 'PCA': 'GLU', 'DAL': 'ALA',
+ 'CME': 'CYS', 'CSD': 'CYS', 'OCS': 'CYS', 'DPR': 'PRO',
+ 'B3K': 'LYS', 'ALY': 'LYS', 'YCM': 'CYS', 'MLZ': 'LYS',
+ 'HY3': 'PRO', 'LLP': 'LYS', 'MGN': 'GLN', 'MHS': 'HIS',
+ 'DBZ': 'ALA', 'DCY': 'CYS', 'DVA': 'VAL', 'NLE': 'LEU',
+ 'SMC': 'CYS', 'AGM': 'ARG', 'B3A': 'ALA', 'DAS': 'ASP',
+ 'DLY': 'LYS', 'DSN': 'SER', 'DTH': 'THR', 'GL3': 'GLY',
+ 'HZP': 'PRO', 'CSX': 'CYS', 'BAL': 'ALA', 'HIC': 'HIS',
+ 'TRQ': 'TRP', 'B3Y': 'TYR', 'PHI': 'PHE', 'PTR': 'TYR',
+ 'TYS': 'TYR', 'IAS': 'ASP', 'GPL': 'LYS', 'KYN': 'TRP',
+ 'SEC': 'CYS'}
+
+
+def _np_get_cb(n, ca, c):
+ '''compute CB placement from N, CA, C'''
+ return _np_extend(c, n, ca, 1.522, 1.927, -2.143)
+
+
+def _np_norm(x, axis=-1, keepdims=True, eps=1e-8):
+ '''compute norm of vector'''
+ return np.sqrt(np.square(x).sum(axis, keepdims=keepdims) + eps)
+
+
+def _np_extend(a, b, c, lengs, atom, d):
+ '''
+ given coordinates a-b-c,
+ c-d (L)ength, b-c-d (A)ngle, and a-b-c-d (D)ihedral
+ return 4th coordinate d
+ '''
+ normalize = lambda x: x / _np_norm(x)
+ bc = normalize(b - c)
+ n = normalize(np.cross(b - a, bc))
+ return c + sum([lengs * np.cos(atom) * bc,
+ lengs * np.sin(atom) * np.cos(d) * np.cross(n, bc),
+ lengs * np.sin(atom) * np.sin(d) * -n])
+
+
+def pdb_to_string(pdb_file):
+ "use pbd to get string"
+ modres = {**MODRES}
+ lines = []
+ for line in open(pdb_file, "rb"):
+ line = line.decode("utf-8", "ignore").rstrip()
+ if line[:6] == "MODRES":
+ x = line[12:15]
+ y = line[24:27]
+ if x not in modres and y in residue_constants.restype_3to1:
+ modres[x] = y
+ if line[:6] == "HETATM":
+ x = line[17:20]
+ if x in modres:
+ line = "ATOM " + line[6:17] + modres.get(x) + line[20:]
+ if line[:4] == "ATOM":
+ lines.append(line)
+ return "\n".join(lines)
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/nn_arch.py b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/nn_arch.py
index dcc986cb2242c37ee4a3cfd8bf04d2865c5725e9..3b233f809df8166d16d4b6cb718b7e9d838ba24c 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/nn_arch.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/colabdesign/nn_arch.py
@@ -1,4 +1,4 @@
-# Copyright 2022 Huawei Technologies Co., Ltd & CPL YiQin GAO Research Group
+# Copyright 2022-2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -23,6 +23,7 @@ from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore import ops
+from ...models.megafold.nn_arch import Megafold
from .module.loss_design import LossNet
@@ -60,7 +61,7 @@ class Colabdesign(nn.Cell):
def __init__(self, config, mixed_precision, seq_vector, ori_seq_len, protocol):
super(Colabdesign, self).__init__()
- self.megafold = MegaFold(config, mixed_precision)
+ self.megafold = Megafold(config, mixed_precision)
self.megafold.add_flags_recursive(train_backward=True)
self.cfg = config
self.seq_vector = seq_vector
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfri.py b/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfri.py
index 0ea64fcd7050548b32dafcd2ce398fd62eeacd49..6596c2806a25aed20c29be2bdbe36adbce74fbdb 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfri.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfri.py
@@ -13,8 +13,6 @@
# limitations under the License.
# ============================================================================
"""deepfri"""
-import numpy as np
-
from mindspore import jit, context
import mindspore as ms
from mindspore import Tensor
@@ -35,16 +33,14 @@ class DeepFri(Model):
else:
self.mixed_precision = True
context.set_context(device_target="Ascend")
- self.configs = config
+ self.config = config
self.checkpoint_url = \
f"https://download.mindspore.cn/mindscience/mindsponge/DeepFri/checkpoint/" \
- f"deepfri_{self.configs.prefix}.ckpt"
- self.checkpoint_path = f"./deepfri_{self.configs.prefix}.ckpt"
- self.use_jit = self.configs.use_jit
- param_dict = ms.load_checkpoint(self.checkpoint_path)
- self.network = Predictor(self.configs.prefix, self.configs, gcn=True)
- ms.load_param_into_net(self.network, param_dict)
- super().__init__(self.checkpoint_url, self.network, self.name, self.white_list)
+ f"DeepFRI_{self.config.prefix}.ckpt"
+ self.checkpoint_path = f"./DeepFRI_{self.config.prefix}.ckpt"
+ self.use_jit = self.config.use_jit
+ self.network = Predictor(self.config.prefix, self.config, gcn=True)
+ super().__init__(self.checkpoint_url, self.checkpoint_path, self.network)
def forward(self, data):
pass
@@ -60,7 +56,6 @@ class DeepFri(Model):
def predict(self, inputs):
inputs[0] = Tensor(inputs[0], dtype=ms.float32)
inputs[1] = Tensor(inputs[1], dtype=ms.float32)
- inputs[2] = Tensor(np.array(inputs[2], np.str_))
if self.use_jit:
outputs = self._jit_forward(inputs)
else:
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfridata.py b/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfridata.py
index dd3d4c142f246930c5be9488e93f4c2f54c3ecca..77fdc2a322a8d826cb6cc5256e05e4adac9e7b06 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfridata.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfridata.py
@@ -15,6 +15,9 @@
"""deepfridata"""
import numpy as np
+from Bio import SeqIO
+from Bio.PDB.PDBParser import PDBParser
+
from ...dataset import curry1
from ....common import residue_constants
@@ -106,20 +109,38 @@ def seq2onehot(seq):
return seqs_x
+def load_predicted_pdb(pdb_file):
+ """Load predicted pdb"""
+ # Generate (diagonalized) C_alpha distance matrix from a pdb file
+ parser = PDBParser()
+ structure = parser.get_structure(pdb_file.split('/')[-1].split('.')[0], pdb_file)
+ residues = [r for r in structure.get_residues()]
+
+ # sequence from atom lines
+ records = SeqIO.parse(pdb_file, 'pdb-atom')
+ seqs = [str(r.seq) for r in records]
+
+ size = len(residues)
+ distances = np.empty((size, size))
+ for x in range(size):
+ for y in range(size):
+ one = residues[x]['CA'].get_coord()
+ two = residues[y]['CA'].get_coord()
+ distances[x, y] = np.linalg.norm(one - two)
+
+ return distances, seqs[0]
+
+
@curry1
def load_cmap(cmap=None):
"""load cmap"""
- if 'C_alpha' not in cmap:
- raise ValueError("C_alpha not in *.npz dict.")
- dis = cmap['C_alpha']
- cmap_thresh = 10.0
- adj = np.double(dis < cmap_thresh)
- seq = str(cmap['seqres'])
+
+ dis, seq = load_predicted_pdb(cmap)
+ adj = np.double(dis < 10.0)
one_hot = seq2onehot(seq)
one_hot = one_hot.reshape(1, *one_hot.shape)
adj = adj.reshape(1, *adj.shape)
temp = []
temp.append(adj)
temp.append(one_hot)
- temp.append(seq)
return temp
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfridataset.py b/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfridataset.py
index 0666c8b5f08f2e17e4087a6077b2c5f7ecbc1eea..3ee7bb20d7a1afe28ab955ce3eb24dc083677948 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfridataset.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/deepfri/deepfridataset.py
@@ -50,7 +50,7 @@ class DeepFriDataSet(PSP):
return data_len
def deepfri_inputs(self):
- feature_list = ['adj', 'seq_1hot', 'seq']
+ feature_list = ['adj', 'seq_1hot']
self.feature_list = feature_list
# pylint: disable=arguments-differ
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/deepfri/nn_arch.py b/MindSPONGE/mindsponge/python/pipeline/models/deepfri/nn_arch.py
index 827cd8054d1a32b558e7546eee89bf56c039071f..bc3f0eb36b56869803fdc959dc2ca8869e4fdaf4 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/deepfri/nn_arch.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/deepfri/nn_arch.py
@@ -58,13 +58,12 @@ class Predictor(nn.Cell):
self.sum_pooling = SumPooling(1)
self.en_dense = nn.Dense((self.config.gc_dims[0] + self.config.gc_dims[1] + self.config.gc_dims[2]),
self.config.fc_dims, has_bias=True, activation='relu')
- self.dropout = nn.Dropout(1 - self.config.dropout)
+ self.dropout = nn.Dropout(p=self.config.dropout)
self.func_predictor = FuncPredictor(self.config.fc_dims, self.config.output_dim, train)
self.pad = ops.Pad(((0, 0), (0, 0), (0, 512 - self.config.input_dim)))
- def predict(self, adj, seq_1hot, seq):
+ def predict(self, adj, seq_1hot):
"""predict"""
- print("### Computing predictions on a single protein...", seq)
if self.gcn:
seq_0 = self.pad(seq_1hot)
x_1 = self.lstm(seq_0)
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/esm2/esm2.py b/MindSPONGE/mindsponge/python/pipeline/models/esm2/esm2.py
index 747cbe337c9316d19bd8a598c8f0f221684d919d..a24f490ec807004bf2ad80cca77b7f618fa35138 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/esm2/esm2.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/esm2/esm2.py
@@ -14,6 +14,7 @@
# ============================================================================
"""esm2 model"""
from mindspore import jit, context
+from mindspore.common import mutable
# pylint: disable=relative-beyond-top-level
from .nn_arch import ESM2 as esm2
from ..model import Model
@@ -33,7 +34,8 @@ class ESM2(Model):
embed_dim=self.config.encoder_embed_dim,
attention_heads=self.config.encoder_attention_heads,
alphabet=self.config.alphabet,
- token_dropout=self.config.token_dropout)
+ token_dropout=self.config.token_dropout,
+ return_contacts=self.config.return_contacts)
super().__init__(self.checkpoint_url, self.checkpoint_path, self.network, self.name)
def forward(self, data):
@@ -45,9 +47,8 @@ class ESM2(Model):
return result
def predict(self, data, **kwargs):
- return_contacts = kwargs.get('return_contacts')
- batch_tokens = data
- forward_data = batch_tokens, return_contacts
+ batch_tokens = mutable(data)
+ forward_data = batch_tokens
x, hidden_representations, attentions, contacts = self.forward(forward_data)
result = (x, hidden_representations, attentions, contacts)
return result
@@ -66,13 +67,11 @@ class ESM2(Model):
@jit
def _jit_forward(self, data):
- batch_tokens, return_contacts = data
- x, hidden_representations, attentions, contacts = self.network(batch_tokens, return_contacts=return_contacts)
+ x, hidden_representations, attentions, contacts = self.network(data)
result = (x, hidden_representations, attentions, contacts)
return result
def _pynative_forward(self, data):
- batch_tokens, return_contacts = data
- x, hidden_representations, attentions, contacts = self.network(batch_tokens, return_contacts=return_contacts)
+ x, hidden_representations, attentions, contacts = self.network(data)
result = (x, hidden_representations, attentions, contacts)
return result
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/esm2/nn_arch.py b/MindSPONGE/mindsponge/python/pipeline/models/esm2/nn_arch.py
index d86f08e25d37e38b0b3e2cf173d10ae8bb6ab630..ab28654cdf240376d339fe70bb51f832151ba8f5 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/esm2/nn_arch.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/esm2/nn_arch.py
@@ -33,6 +33,8 @@ class ESM2(nn.Cell):
attention_heads: int = 20,
alphabet: Union[Alphabet, str] = "ESM-1b",
token_dropout: bool = True,
+ return_contacts=False,
+ need_head_weights=False
):
super().__init__()
self.num_layers = num_layers
@@ -49,12 +51,14 @@ class ESM2(nn.Cell):
self.prepend_bos = alphabet.prepend_bos
self.append_eos = alphabet.append_eos
self.token_dropout = token_dropout
-
+ self.return_contacts = return_contacts
+ self.need_head_weights = need_head_weights
self._init_submodules()
- def construct(self, tokens, need_head_weights=False, return_contacts=False):
+ def construct(self, tokens):
"""ESM2 Model structure"""
- if return_contacts:
+ need_head_weights = self.need_head_weights
+ if self.return_contacts:
need_head_weights = True
padding_mask = ops.equal(tokens, self.padding_idx)
x = self.embed_scale * self.embed_tokens(tokens)
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/__init__.py b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/__init__.py
index ad095104346dc1af4bc247eee2c54f91eba96bbf..e26d32eadbd96ed4fbaed6afd725a0d25da599b0 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/__init__.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/__init__.py
@@ -1,12 +1,4 @@
-# Copyright 2023 @ Shenzhen Bay Laboratory &
-# Peking University &
-# Huawei Technologies Co., Ltd
-#
-# This code is a part of MindSPONGE:
-# MindSpore Simulation Package tOwards Next Generation molecular modelling.
-#
-# MindSPONGE is open-source software based on the AI-framework:
-# MindSpore (https://www.mindspore.cn/)
+# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/data_process.py b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/data_process.py
index 4c35b96635c2971993784a1b8f6bdf655c0b95f1..fb470e8c702f4fc6a47eeee796fe36d0ff9eae6e 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/data_process.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/data_process.py
@@ -21,6 +21,7 @@ import json
import pickle
import numpy as np
import pandas as pd
+from tqdm import tqdm
from rdkit import Chem
import networkx as nx
@@ -73,7 +74,7 @@ def smile_to_graph(input_smile):
return c_size, features, edge_index
-def seq_cat(prot, max_seq_len):
+def seq_cat(prot, max_seq_len, seq_dict):
x = np.zeros(max_seq_len)
for i, ch in enumerate(prot[:max_seq_len]):
x[i] = seq_dict[ch]
@@ -124,13 +125,12 @@ def create_prot_csv(input_datasets):
print('len(set(drugs)),len(set(prots)):', len(set(drug_t)), len(set(prot_t)))
-def process_data(xd, xt, y, smile_graph_info, pkl_path):
+def process_data(xd, xt, y, smile_graph_info):
"""save data into pickle file"""
assert (len(xd) == len(xt) == len(y)), "The three lists must be the same length!"
data_len = len(xd)
res = []
- for i in range(data_len):
- print('Converting SMILES to graph: {}/{}'.format(i+1, data_len))
+ for i in tqdm(range(data_len), "generating features"):
smiles = xd[i]
target = xt[i]
labels = y[i]
@@ -144,43 +144,32 @@ def process_data(xd, xt, y, smile_graph_info, pkl_path):
"target": np.array([target]),
"num_nodes": np.array([c_size])}
res.append(res_t)
+ return res
- # save features to pickle file
- with os.fdopen(os.open(pkl_path, os.O_CREAT, stat.S_IWUSR), 'wb') as f:
- pickle.dump(res, f)
+def generate_feature(data_path):
+ """generate feature"""
+ print(f"start preprocessing {data_path}:")
-if __name__ == '__main__':
- # choose the data to generate dataset for training or inference, support kiba or davis
- datasets = ['kiba']
+ seq_voc = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
+ max_seq_len = 1000
+ seq_dict = {v: (i + 1) for i, v in enumerate(seq_voc)}
- create_prot_csv(datasets)
-
- SEQ_VOC = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
- MAX_SEQ_LEN = 1000
- seq_dict = {v: (i + 1) for i, v in enumerate(SEQ_VOC)}
- seq_dict_len = len(seq_dict)
-
- compound_iso_smiles = []
- for dt_name in datasets:
- opts = ['train', 'test']
- for opt in opts:
- df = pd.read_csv(f'data/{dt_name}/{dt_name}_{opt}.csv')
- compound_iso_smiles += list(df['compound_iso_smiles'])
- compound_iso_smiles_set = set(compound_iso_smiles)
+ df_data = pd.read_csv(data_path)
+ drugs = list(df_data['compound_iso_smiles'])
+ compound_iso_smiles_set = set(drugs)
smile_graph = {}
- for smile in compound_iso_smiles_set:
+ for smile in tqdm(compound_iso_smiles_set, "extracting smiles to graph"):
g = smile_to_graph(smile)
smile_graph[smile] = g
- # convert data and save to pickle
- for dataset in datasets:
- for opt in ['train', 'test']:
- df_data = pd.read_csv(f'data/{dataset}/{dataset}_{opt}.csv')
- drugs = list(df_data['compound_iso_smiles'])
- prots = list(df_data['target_sequence'])
- Y = list(df_data['affinity'])
- XT = [seq_cat(t, MAX_SEQ_LEN) for t in prots]
- drugs, prots, Y = np.asarray(drugs), np.asarray(XT), np.asarray(Y)
-
- process_data(drugs, prots, Y, smile_graph, f"data/{dataset}/{dataset}_{opt}.pkl")
+ prots = list(df_data['target_sequence'])
+ if "affinity" not in df_data:
+ y = np.zeros(len(drugs))
+ else:
+ y = list(df_data['affinity'])
+ xt = [seq_cat(t, max_seq_len, seq_dict) for t in prots]
+ drugs, prots, y = np.asarray(drugs), np.asarray(xt), np.asarray(y)
+
+ feature = process_data(drugs, prots, y, smile_graph)
+ return feature
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta.py b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta.py
index 16d4936c22165bcd407e2f53d9cf80c6104c6603..b5ccdae171d7dfc608a80f7d5fca2dd2e669fd38 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta.py
@@ -1,12 +1,4 @@
-# Copyright 2023 @ Shenzhen Bay Laboratory &
-# Peking University &
-# Huawei Technologies Co., Ltd
-#
-# This code is a part of MindSPONGE:
-# MindSpore Simulation Package tOwards Next Generation molecular modelling.
-#
-# MindSPONGE is open-source software based on the AI-framework:
-# MindSpore (https://www.mindspore.cn/)
+# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -35,14 +27,15 @@ from ..model import Model
class GraphDTA(Model):
"""GraphDTA"""
name = "GraphDTA"
- feature_list = ["x_feature", "x_mask", "edge_feature", "edge_mask", "target_feature", "target_mask", "label",
- "batch_info", "index_all"]
+ feature_list = ["x_feature_batch", "edge_feature_batch", "target_feature_batch", "batch_info"]
def __init__(self, config):
self.config = config
self.use_jit = self.config.use_jit
self.white_list = (nn.Softmax, nn.LayerNorm)
- self.checkpoint_url = self.config.checkpoint_url
+ self.checkpoint_url = \
+ "https://download.mindspore.cn/mindscience/mindsponge/GraphDTA/checkpoint/graphdta_model.ckpt"
+ self.checkpoint_path = "./graphdta_model.ckpt"
self.network = Graphdta(self.config)
if self.config.train:
@@ -55,7 +48,8 @@ class GraphDTA(Model):
else:
self.network.set_train(False)
- super().__init__(self.checkpoint_url, network=self.network, name=self.name, white_list=self.white_list)
+ super().__init__(self.checkpoint_url, self.checkpoint_path, network=self.network,
+ name=self.name, white_list=self.white_list)
@jit
def backward(self, data):
@@ -76,7 +70,7 @@ class GraphDTA(Model):
for key in data:
data[key] = Tensor(data[key])
logits = self.forward(data)
- return logits
+ return logits[0]
def loss(self, data):
pass
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta_configuration.py b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta_configuration.py
index 900c1d9938f0a269fb3042d383c263a7f7fd19e2..919c189a4e19bab657d28bd4889fb4fe9d34a1be 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta_configuration.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta_configuration.py
@@ -1,4 +1,4 @@
-# Copyright 2023 The AIMM Group at Shenzhen Bay Laboratory & Peking University & Huawei Technologies Co., Ltd
+# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,4 +16,5 @@
graphdta_configuration = {
"train": "https://download.mindspore.cn/mindscience/mindsponge/GraphDTA/config/train.yaml",
+ "inference": "https://download.mindspore.cn/mindscience/mindsponge/GraphDTA/config/inference.yaml",
}
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta_dataset.py b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta_dataset.py
index 4d4c786a5a486a043f2d1f0128de919a7bcea4d1..dc49f5bae71e2f3aee8d154d00ee636cfcc510bf 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta_dataset.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/graphdta_dataset.py
@@ -13,17 +13,17 @@
# limitations under the License.
# ============================================================================
"""graphdta dataset processing script."""
-
-import pickle
import numpy as np
from mindspore.dataset import GeneratorDataset
+from .data_process import generate_feature
class GraphDTADataSet:
"""Class for Generate Dataset."""
def __init__(self, config):
self.batch_size = config.batch_size
+ self.data_path = config.data_path
self.train_data = None
self.train_index = None
self.column_name = ["x_feature", "x_mask", "edge_feature", "edge_mask", "target_feature", "target_mask",
@@ -115,14 +115,14 @@ class GraphDTADataSet:
edge_mask1[0][:edge_num_all] = 1
edge_mask1[1][:edge_num_all] = 1
- new_train_data = {"x_feature_batch": x_1,
- "x_mask_batch": x_mask1,
- "edge_feature_batch": edge_feat1,
- "edge_mask_batch": edge_mask1,
- "target_feature_batch": target_feat1,
- "target_mask_batch": np.zeros((batch_size, 1000)),
- "label_batch": label1,
- "batch_info": batch1,
+ new_train_data = {"x_feature_batch": x_1.astype(np.float32),
+ "x_mask_batch": x_mask1.astype(np.int32),
+ "edge_feature_batch": edge_feat1.astype(np.int32),
+ "edge_mask_batch": edge_mask1.astype(np.int32),
+ "target_feature_batch": target_feat1.astype(np.int32),
+ "target_mask_batch": np.zeros((batch_size, 1000)).astype(np.int32),
+ "label_batch": label1.astype(np.float32),
+ "batch_info": batch1.astype(np.int32),
}
return new_train_data
@@ -130,9 +130,8 @@ class GraphDTADataSet:
"""set training data src"""
if data_src is None:
raise FileNotFoundError
- with open(data_src, "rb") as f:
- input_data = pickle.load(f)
- self.train_data = input_data
+ self.data_path = data_src
+ self.train_data = self.raw_feature()
def create_iterator(self, num_epochs):
"""create data iterator"""
@@ -146,3 +145,14 @@ class GraphDTADataSet:
num_parallel_workers=4, shuffle=True, max_rowsize=16)
iteration = dataset.create_dict_iterator(num_epochs=1, output_numpy=True)
return iteration
+
+ def raw_feature(self):
+ feature = generate_feature(self.data_path)
+ return feature
+
+ def process(self, data_path):
+ self.data_path = data_path
+ raw_feature = self.raw_feature()
+ index_all = [0] # for inference, only one data at a time
+ feature_dict = self.process_data(raw_feature, 1, index_all)
+ return feature_dict
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/nn_arch.py b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/nn_arch.py
index 295cb194236c719c3d6bad06ccbb6f929da07809..e5c69a08591a272b4ec1bcc6d0dd07a5ce525d0f 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/graphdta/nn_arch.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/graphdta/nn_arch.py
@@ -57,7 +57,7 @@ class Graphdta(nn.Cell):
self.fc1 = nn.Dense(2*output_dim, 1024)
self.fc2 = nn.Dense(1024, 512)
self.out = nn.Dense(512, n_output)
- self.graph_mask = ms.Tensor(np.ones(512), ms.int32)
+ self.graph_mask = ms.Tensor(np.ones(batch_size), ms.int32)
self.max_pooling = MaxPooling()
self.cat = ops.Concat(axis=1)
self.loss = nn.MSELoss()
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/grover/__init__.py b/MindSPONGE/mindsponge/python/pipeline/models/grover/__init__.py
index 86a4a29e559749172dae2227b2b61b6c925adf0e..25bec70e15dbf80cb601873129c6813f3ecb1100 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/grover/__init__.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/grover/__init__.py
@@ -1,29 +1,21 @@
-# Copyright 2023 @ Shenzhen Bay Laboratory &
-# Peking University &
-# Huawei Technologies Co., Ltd
-#
-# This code is a part of MindSPONGE:
-# MindSpore Simulation Package tOwards Next Generation molecular modelling.
-#
-# MindSPONGE is open-source software based on the AI-framework:
-# MindSpore (https://www.mindspore.cn/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""grovers"""
-from .grover import Grover
-from .grover_dataset import GroverDataSet
-from .grover_configuration import grover_configuration
-from .split_data import SplitData
-from .save_features import SaveFeatures
-from .build_vocab import BuildVocab
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""grovers"""
+from .grover import Grover
+from .grover_dataset import GroverDataSet
+from .grover_configuration import grover_configuration
+from .split_data import SplitData
+from .save_features import SaveFeatures
+from .build_vocab import BuildVocab
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/grover/build_vocab.py b/MindSPONGE/mindsponge/python/pipeline/models/grover/build_vocab.py
index d2b3cdbb02f18fa53e1c59f53880e74020f1d35d..b56e980cacbf2cd197a6b7dba989a3d99d9ba9eb 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/grover/build_vocab.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/grover/build_vocab.py
@@ -1,42 +1,44 @@
-# Copyright 2022 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""
-The vocabulary building scripts.
-"""
-import os
-from .src.data.mindsporevocab import MolVocab
-
-
-class BuildVocab:
- """BuildVocab"""
- def __init__(self):
- pass
-
- def build_vocab(self, data_path, vocab_save_folder, dataset_name, vocab_min_freq):
- """
- Build vocab(atom/bond) for unlabelled data training.
- """
- for vocab_type in ['atom', 'bond']:
- vocab_file = f"{vocab_type}_vocab.pkl"
- if dataset_name is not None:
- vocab_file = dataset_name + '_' + vocab_file
- vocab_save_path = os.path.join(vocab_save_folder, vocab_file)
- os.makedirs(os.path.dirname(vocab_save_path), exist_ok=True)
- vocab = MolVocab(file_path=data_path,
- min_freq=vocab_min_freq,
- num_workers=1,
- vocab_type=vocab_type)
- print(f"{vocab_type} vocab size", len(vocab))
- vocab.save_vocab(vocab_save_path)
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+The vocabulary building scripts.
+"""
+import os
+from .src.data.mindsporevocab import MolVocab
+
+
+class BuildVocab:
+ """BuildVocab"""
+ def __init__(self):
+ pass
+
+ def build_vocab(self, data_path, vocab_save_folder, dataset_name, vocab_min_freq):
+ """
+ Build vocab(atom/bond) for unlabelled data training.
+ """
+ for vocab_type in ['atom', 'bond']:
+ vocab_file = f"{vocab_type}_vocab.pkl"
+ if dataset_name is not None:
+ vocab_file = dataset_name + '_' + vocab_file
+ vocab_save_path = os.path.join(vocab_save_folder, vocab_file)
+ if os.path.exists(vocab_save_path):
+ continue
+ os.makedirs(os.path.dirname(vocab_save_path), exist_ok=True)
+ vocab = MolVocab(file_path=data_path,
+ min_freq=vocab_min_freq,
+ num_workers=1,
+ vocab_type=vocab_type)
+ print(f"{vocab_type} vocab size", len(vocab))
+ vocab.save_vocab(vocab_save_path)
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/grover/grover.py b/MindSPONGE/mindsponge/python/pipeline/models/grover/grover.py
index 741d87210eb213e50ce8aa45d91662277c473f6e..9b3299a9acd5dde3abd38a32f82c1c439b020187 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/grover/grover.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/grover/grover.py
@@ -1,338 +1,232 @@
-# Copyright 2022 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""grover"""
-import time
-import mindspore as ms
-from mindspore import jit, nn
-from mindspore.common import mutable
-from mindspore.communication.management import init
-from ..model import Model
-from .nn_arch import GROVEREmbedding, GroverFinetuneTask, GroverFpGenerationTask, GroverPretrainTask
-from .src.util.scheduler import get_lr
-from .src.model_utils.local_adapter import get_device_id, get_device_num, get_rank_id
-
-
-def load_parameters(network, file_name):
- """
- Load parameters for evaluating.
- """
- param_dict = ms.load_checkpoint(file_name)
- param_dict_new = {}
- filter_key = {}
- for key, values in param_dict.items():
- if key.startswith('grover.') or key.startswith('mol'):
- if key in filter_key:
- continue
- param_dict_new[key] = values
- else:
- continue
- ms.load_param_into_net(network, param_dict_new)
-
-
-def load_convert_params(args, network):
- """
- Load pretrained model parameters for finetuning.
- """
- if args.resume_grover:
- param_dict = ms.load_checkpoint(args.resume_grover)
- param_dict_new = {}
- for key, values in param_dict.items():
- param_dict_new[key] = values
- ms.load_param_into_net(network, param_dict_new)
-
-
-def eval_context_init(args):
- """
- Init context.
- """
- device_id = get_device_id()
- ms.set_context(mode=ms.GRAPH_MODE, device_target=args.device_target, save_graphs=False, device_id=device_id)
-
- ms.reset_auto_parallel_context()
- if args.run_distribute:
- init()
- args.device_num = get_device_num()
- args.rank = get_rank_id()
- parallel_mode = ms.ParallelMode.DATA_PARALLEL
-
- else:
- args.device_num = 1
- args.rank = 0
- parallel_mode = ms.ParallelMode.STAND_ALONE
-
- ms.set_auto_parallel_context(device_num=args.device_num,
- parallel_mode=parallel_mode,
- gradients_mean=True)
-
-
-def gen_context_init(args):
- """
- Init Context.
- """
- device_id = get_device_id()
- ms.set_context(mode=ms.GRAPH_MODE, device_target=args.device_target, save_graphs=False, device_id=device_id)
-
- if ms.get_context("device_target") == "Ascend":
- ms.set_context(max_device_memory="10GB")
-
- ms.reset_auto_parallel_context()
- if args.run_distribute:
- init()
- args.device_num = get_device_num()
- args.rank = get_rank_id()
- parallel_mode = ms.ParallelMode.DATA_PARALLEL
-
- else:
- args.device_num = 1
- args.rank = 0
- parallel_mode = ms.ParallelMode.STAND_ALONE
-
- ms.set_auto_parallel_context(device_num=args.device_num,
- parallel_mode=parallel_mode,
- gradients_mean=True)
-
- args.rank_save_ckpt_flag = 0
- if args.is_save_on_master:
- if args.rank == 0:
- args.rank_save_ckpt_flag = 1
- else:
- args.rank_save_ckpt_flag = 1
-
-
-def pretrain_context_init(args):
- """
- Init context.
- """
- device_id = get_device_id()
- ms.set_context(mode=ms.GRAPH_MODE, device_target=args.device_target, save_graphs=False, device_id=device_id)
-
- if ms.get_context("device_target") == "Ascend":
- ms.set_context(max_device_memory="10GB")
-
- ms.reset_auto_parallel_context()
- print(args.run_distribute)
- if args.run_distribute:
- init()
- args.device_num = get_device_num()
- args.rank = get_rank_id()
- parallel_mode = ms.ParallelMode.DATA_PARALLEL
-
- else:
- args.device_num = 1
- args.rank = 0
- parallel_mode = ms.ParallelMode.STAND_ALONE
-
- ms.set_auto_parallel_context(device_num=args.device_num,
- parallel_mode=parallel_mode,
- gradients_mean=True)
- args.rank_save_ckpt_flag = 0
- if args.is_save_on_master:
- if args.rank == 0:
- args.rank_save_ckpt_flag = 1
- else:
- args.rank_save_ckpt_flag = 1
-
-
-def train_context_init(args):
- """
- Init context.
- """
- device_id = get_device_id()
- ms.set_context(mode=ms.GRAPH_MODE, device_target=args.device_target, save_graphs=False, device_id=device_id)
-
- if ms.get_context("device_target") == "Ascend":
- ms.set_context(max_device_memory="10GB")
-
- ms.reset_auto_parallel_context()
- if args.run_distribute:
- init()
- args.device_num = get_device_num()
- args.rank = get_rank_id()
- parallel_mode = ms.ParallelMode.DATA_PARALLEL
-
- else:
- args.device_num = 1
- args.rank = 0
- parallel_mode = ms.ParallelMode.STAND_ALONE
-
- ms.set_auto_parallel_context(device_num=args.device_num,
- parallel_mode=parallel_mode,
- gradients_mean=True)
-
- args.rank_save_ckpt_flag = 0
- if args.is_save_on_master:
- if args.rank == 0:
- args.rank_save_ckpt_flag = 1
- else:
- args.rank_save_ckpt_flag = 1
-
-
-class Grover(Model):
- """Grover"""
- name = "Grover"
-
- def __init__(self, config, **kwargs):
- self.config = config
- self.use_jit = self.config.use_jit
- self.checkpoint_url = 'https://download.mindspore.cn/mindscience/mindsponge/grover/checkpoint/grover.ckpt'
- self.checkpoint_path = "./grover.ckpt"
- if self.config.parser_name == "eval":
- eval_context_init(config)
- config.is_training = False
- config.features_dim = kwargs['features_dim']
- config.output_size = kwargs['output_size']
- grover_model = GROVEREmbedding(config)
- network = GroverFinetuneTask(config, grover_model, is_training=config.is_training)
- load_parameters(network, config.pretrained)
- network.set_train(False)
- elif self.config.parser_name == "gen":
- gen_context_init(config)
- config.is_training = False
- grover_model = GROVEREmbedding(config)
- load_convert_params(config, grover_model)
- network = GroverFpGenerationTask(config, grover_model)
- network.set_train(False)
- elif self.config.parser_name == "pretrain":
- pretrain_context_init(config)
- config.is_training = True
- grover_model = GROVEREmbedding(config)
- network = GroverPretrainTask(config, grover_model,
- atom_vocab_size=kwargs['atom_vocab_size'],
- bond_vocab_size=kwargs['bond_vocab_size'],
- fg_size=kwargs['fg_size'])
- config.steps_per_epoch = kwargs['steps_per_epoch']
- lr = get_lr(config)
- opt = nn.Adam(network.trainable_params(), learning_rate=ms.Tensor(lr), weight_decay=config.weight_decay)
- if config.mixed:
- loss_scale_manager = ms.FixedLossScaleManager(config.loss_scale_value, drop_overflow_update=False)
- network = ms.build_train_network(network, optimizer=opt, loss_scale_manager=loss_scale_manager,
- level="O2", keep_batchnorm_fp32=False)
- for _, cell in network.cells_and_names():
- if isinstance(cell, (GroverPretrainLossBlock, nn.Softmax, nn.LayerNorm, SelectIndex)):
- cell.to_float(ms.float32)
-
- else:
- network = nn.TrainOneStepCell(network=network, optimizer=opt)
- network.set_train(True)
- else:
- train_context_init(config)
- config.is_training = True
- config.features_dim = kwargs['features_dim']
- config.output_size = kwargs['output_size']
- grover_model = GROVEREmbedding(config)
- load_convert_params(config, grover_model)
- network = GroverFinetuneTask(config, grover_model, is_training=config.is_training)
- config.steps_per_epoch = kwargs['steps_per_epoch']
- lr = get_lr(config)
- opt = nn.Adam(network.trainable_params(), learning_rate=ms.Tensor(lr), weight_decay=config.weight_decay)
- if config.mixed:
- loss_scale_manager = ms.FixedLossScaleManager(config.loss_scale_value, drop_overflow_update=False)
- network = ms.build_train_network(network, optimizer=opt, loss_scale_manager=loss_scale_manager,
- level="O2", keep_batchnorm_fp32=False)
-
- for _, cell in network.cells_and_names():
- if isinstance(cell, (GroverFinetuneLossBlock, nn.Softmax, nn.LayerNorm, SelectIndex)):
- cell.to_float(ms.float32)
- else:
- network = nn.TrainOneStepCell(network=network, optimizer=opt)
- network.set_train(True)
- self.network = network
- super().__init__(self.checkpoint_url, self.network, self.name)
-
- # pylint: disable=arguments-differ
- def forward(self, input_graph, scope, features_batch):
- if self.use_jit:
- # pylint: disable=arguments-differ
- preds = self._jit_forward(input_graph, scope, features_batch)
- else:
- preds = self._pynative_forward(input_graph, scope, features_batch)
- return preds
-
- def predict(self, data, **kwargs):
- preds = None
- if self.config.parser_name == "eval":
- features_batch = data["features"]
- f_atoms = data["f_atoms"]
- f_bonds = data["f_bonds"]
- a2b = data["a2b"]
- b2a = data["b2a"]
- b2revb = data["b2revb"]
- a2a = data["a2a"]
- a_scope = data["a_scope"].asnumpy().tolist()
- b_scope = data["b_scope"].asnumpy().tolist()
- scope = (a_scope, b_scope)
- input_graph = (f_atoms, f_bonds, a2b, b2a, b2revb, a2a)
- t1 = time.time()
- preds = self.forward(input_graph, scope, features_batch)
- t2 = time.time()
- print(round(t2 - t1))
- else:
- features_batch = data["features"]
- a_scope = data["a_scope"].asnumpy().tolist()
- b_scope = data["b_scope"].asnumpy().tolist()
- scope = (a_scope, b_scope)
- input_graph = (data["f_atoms"], data["f_bonds"], data["a2b"], data["b2a"], data["b2revb"], data["a2a"])
- t1 = time.time()
- preds = self.forward(input_graph, scope, features_batch)
- t2 = time.time()
- print(round(t2 - t1))
- return preds
-
- def loss(self, data):
- pass
-
- def grad_operations(self, gradient):
- pass
-
- @jit
- def backward(self, data):
- loss = self.network(*data)
- return loss
-
- def train_step(self, data):
- if self.config.parser_name == "pretrain":
- a_scope = data["a_scope"].asnumpy().tolist()
- b_scope = data["b_scope"].asnumpy().tolist()
- scope = (a_scope, b_scope)
- input_graph = (data["f_atoms"], data["f_bonds"], data["a2b"], data["b2a"], data["b2revb"], data["a2a"])
- input_graph = mutable(input_graph)
- targets = (data["atom_vocab_label"], data["bond_vocab_label"], data["fgroup_label"])
- targets = mutable(targets)
- feat = (input_graph, scope, targets)
- else:
- features_batch = data["features"]
- targets = data["labels"]
- a_scope = data["a_scope"].asnumpy().tolist()
- b_scope = data["b_scope"].asnumpy().tolist()
- scope = (a_scope, b_scope)
- input_graph = (data["f_atoms"], data["f_bonds"], data["a2b"], data["b2a"], data["b2revb"], data["a2a"])
- input_graph = mutable(input_graph)
- feat = (input_graph, scope, features_batch, targets)
- t1 = time.time()
- loss = self.backward(feat)
- t2 = time.time()
- print("backward time : ", round(t2 - t1, 2))
- return loss
-
- # pylint: disable=arguments-differ
- def _pynative_forward(self, input_graph, scope, features_batch):
- preds = self.network(input_graph, scope, features_batch)
- return preds
-
- # pylint: disable=arguments-differ
- @jit
- def _jit_forward(self, data, scope, features_batch):
- preds = self.network(data, scope, features_batch)
- return preds
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""grover"""
+import mindspore as ms
+from mindspore import jit, nn
+from mindspore.common import mutable
+from ..model import Model
+from .nn_arch import GROVEREmbedding, GroverFinetuneTask, GroverFpGenerationTask, GroverPretrainTask
+from .src.util.scheduler import get_lr
+
+
+def load_parameters(network, file_name):
+ """
+ Load parameters for evaluating.
+ """
+ param_dict = ms.load_checkpoint(file_name)
+ param_dict_new = {}
+ filter_key = {}
+ for key, values in param_dict.items():
+ if key.startswith('grover.') or key.startswith('mol'):
+ if key in filter_key:
+ continue
+ param_dict_new[key] = values
+ else:
+ continue
+ ms.load_param_into_net(network, param_dict_new)
+
+
+def load_convert_params(args, network):
+ """
+ Load pretrained model parameters for finetuning.
+ """
+ if args.resume_grover:
+ param_dict = ms.load_checkpoint(args.resume_grover)
+ param_dict_new = {}
+ for key, values in param_dict.items():
+ param_dict_new[key] = values
+ ms.load_param_into_net(network, param_dict_new)
+
+
+def eval_context_init(args):
+ """
+ Init context.
+ """
+ ms.set_context(device_target=args.device_target, save_graphs=False)
+
+
+def gen_context_init(args):
+ """
+ Init Context.
+ """
+ ms.set_context(device_target=args.device_target, save_graphs=False)
+
+ if ms.get_context("device_target") == "Ascend":
+ ms.set_context(max_device_memory="10GB")
+
+
+def pretrain_context_init(args):
+ """
+ Init context.
+ """
+ ms.set_context(device_target=args.device_target, save_graphs=False)
+
+ if ms.get_context("device_target") == "Ascend":
+ ms.set_context(max_device_memory="10GB")
+
+
+def train_context_init(args):
+ """
+ Init context.
+ """
+ ms.set_context(device_target=args.device_target, save_graphs=False)
+
+ if ms.get_context("device_target") == "Ascend":
+ ms.set_context(max_device_memory="10GB")
+
+
+class Grover(Model):
+ """Grover"""
+ name = "Grover"
+
+ def __init__(self, config, **kwargs):
+ self.config = config
+ self.use_jit = self.config.use_jit
+ self.checkpoint_url = 'https://download.mindspore.cn/mindscience/mindsponge/grover/checkpoint/grover.ckpt'
+ self.checkpoint_path = "./grover.ckpt"
+ if self.config.parser_name == "eval":
+ eval_context_init(config)
+ config.is_training = False
+ grover_model = GROVEREmbedding(config)
+ network = GroverFinetuneTask(config, grover_model, is_training=config.is_training)
+ network.set_train(False)
+ elif self.config.parser_name == "gen":
+ gen_context_init(config)
+ config.is_training = False
+ grover_model = GROVEREmbedding(config)
+ network = GroverFpGenerationTask(config, grover_model)
+ network.set_train(False)
+ elif self.config.parser_name == "pretrain":
+ pretrain_context_init(config)
+ config.is_training = True
+ grover_model = GROVEREmbedding(config)
+ network = GroverPretrainTask(config, grover_model,
+ atom_vocab_size=kwargs['atom_vocab_size'],
+ bond_vocab_size=kwargs['bond_vocab_size'],
+ fg_size=kwargs['fg_size'])
+ config.steps_per_epoch = kwargs['steps_per_epoch']
+ lr = get_lr(config)
+ opt = nn.Adam(network.trainable_params(), learning_rate=ms.Tensor(lr), weight_decay=config.weight_decay)
+ if config.mixed:
+ loss_scale_manager = ms.FixedLossScaleManager(config.loss_scale_value, drop_overflow_update=False)
+ network = ms.build_train_network(network, optimizer=opt, loss_scale_manager=loss_scale_manager,
+ level="O2", keep_batchnorm_fp32=False)
+ for _, cell in network.cells_and_names():
+ if isinstance(cell, (GroverPretrainLossBlock, nn.Softmax, nn.LayerNorm, SelectIndex)):
+ cell.to_float(ms.float32)
+
+ else:
+ network = nn.TrainOneStepCell(network=network, optimizer=opt)
+ network.set_train(True)
+ else:
+ train_context_init(config)
+ config.is_training = True
+ config.features_dim = kwargs['features_dim']
+ config.output_size = kwargs['output_size']
+ grover_model = GROVEREmbedding(config)
+ network = GroverFinetuneTask(config, grover_model, is_training=config.is_training)
+ config.steps_per_epoch = kwargs['steps_per_epoch']
+ lr = get_lr(config)
+ opt = nn.Adam(network.trainable_params(), learning_rate=ms.Tensor(lr), weight_decay=config.weight_decay)
+ if config.mixed:
+ loss_scale_manager = ms.FixedLossScaleManager(config.loss_scale_value, drop_overflow_update=False)
+ network = ms.build_train_network(network, optimizer=opt, loss_scale_manager=loss_scale_manager,
+ level="O2", keep_batchnorm_fp32=False)
+
+ for _, cell in network.cells_and_names():
+ if isinstance(cell, (GroverFinetuneLossBlock, nn.Softmax, nn.LayerNorm, SelectIndex)):
+ cell.to_float(ms.float32)
+ else:
+ network = nn.TrainOneStepCell(network=network, optimizer=opt)
+ network.set_train(True)
+ self.network = network
+ super().__init__(self.checkpoint_url, self.checkpoint_path, self.network, self.name)
+
+ # pylint: disable=arguments-differ
+ def forward(self, input_graph, scope, features_batch):
+ if self.use_jit:
+ # pylint: disable=arguments-differ
+ preds = self._jit_forward(input_graph, scope, features_batch)
+ else:
+ preds = self._pynative_forward(input_graph, scope, features_batch)
+ return preds
+
+ def predict(self, data, **kwargs):
+ preds = None
+ if self.config.parser_name == "eval":
+ features_batch = data["features"]
+ f_atoms = data["f_atoms"]
+ f_bonds = data["f_bonds"]
+ a2b = data["a2b"]
+ b2a = data["b2a"]
+ b2revb = data["b2revb"]
+ a2a = data["a2a"]
+ a_scope = data["a_scope"].asnumpy().tolist()
+ b_scope = data["b_scope"].asnumpy().tolist()
+ scope = (a_scope, b_scope)
+ input_graph = (f_atoms, f_bonds, a2b, b2a, b2revb, a2a)
+ preds = self.forward(input_graph, scope, features_batch)
+ else:
+ features_batch = data["features"]
+ a_scope = data["a_scope"].asnumpy().tolist()
+ b_scope = data["b_scope"].asnumpy().tolist()
+ scope = (a_scope, b_scope)
+ input_graph = (data["f_atoms"], data["f_bonds"], data["a2b"], data["b2a"], data["b2revb"], data["a2a"])
+ preds = self.forward(input_graph, scope, features_batch)
+ return preds
+
+ def loss(self, data):
+ pass
+
+ def grad_operations(self, gradient):
+ pass
+
+ @jit
+ def backward(self, data):
+ loss = self.network(*data)
+ return loss
+
+ def train_step(self, data):
+ if self.config.parser_name == "pretrain":
+ a_scope = data["a_scope"].asnumpy().tolist()
+ b_scope = data["b_scope"].asnumpy().tolist()
+ scope = (a_scope, b_scope)
+ input_graph = (data["f_atoms"], data["f_bonds"], data["a2b"], data["b2a"], data["b2revb"], data["a2a"])
+ input_graph = mutable(input_graph)
+ targets = (data["atom_vocab_label"], data["bond_vocab_label"], data["fgroup_label"])
+ targets = mutable(targets)
+ feat = (input_graph, scope, targets)
+ else:
+ features_batch = data["features"]
+ targets = data["labels"]
+ a_scope = data["a_scope"].asnumpy().tolist()
+ b_scope = data["b_scope"].asnumpy().tolist()
+ scope = (a_scope, b_scope)
+ input_graph = (data["f_atoms"], data["f_bonds"], data["a2b"], data["b2a"], data["b2revb"], data["a2a"])
+ input_graph = mutable(input_graph)
+ feat = (input_graph, scope, features_batch, targets)
+ loss = self.backward(feat)
+ return loss
+
+ # pylint: disable=arguments-differ
+ def _pynative_forward(self, input_graph, scope, features_batch):
+ preds = self.network(input_graph, scope, features_batch)
+ return preds
+
+ # pylint: disable=arguments-differ
+ @jit
+ def _jit_forward(self, data, scope, features_batch):
+ preds = self.network(data, scope, features_batch)
+ return preds
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/grover/grover_configuration.py b/MindSPONGE/mindsponge/python/pipeline/models/grover/grover_configuration.py
index 6b56ea14a86ce7d8b021b9047daafd01710194ab..9d374463c27ea780c7987df8f4c257375aff53d4 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/grover/grover_configuration.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/grover/grover_configuration.py
@@ -1,4 +1,4 @@
-# Copyright 2022 Huawei Technologies Co., Ltd
+# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/grover/grover_dataset.py b/MindSPONGE/mindsponge/python/pipeline/models/grover/grover_dataset.py
index c37111254da1775216cc09a0cb806c75e7cd38be..48885b3366d551d69319cfad412d43d20bb488b3 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/grover/grover_dataset.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/grover/grover_dataset.py
@@ -1,210 +1,242 @@
-# Copyright 2022 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""grover_dataset"""
-import os
-import multiprocessing
-import numpy as np
-from mindspore import dataset as ds
-from .src.util.utils import load_features, load_smiles_labels
-from .src.data.mindsporevocab import MolVocab
-from .src.data.transforms import MolCollator, GroverCollator, normalize_data
-from ...dataset import DataSet
-
-
-def get_smiles_labels(args, smiles_file, is_training, mode):
- """
- Load and Process smiles and labels.
- """
- smiles_list, labels_list = load_smiles_labels(smiles_file)
-
- if mode == "finetune" and args.dataset_type == "regression":
- labels_scaler_path = os.path.join(args.scaler_path, "labels_scaler.ckpt")
- labels_list, labels_scaler = normalize_data(labels_list, replace_nan_token=None, is_training=is_training,
- path=labels_scaler_path)
- else:
- labels_scaler = None
-
- return smiles_list, labels_list, labels_scaler
-
-
-def get_features(args, features_file, is_training, mode):
- """
- Load and Process features.
- """
- lines = load_features(features_file)
- features_list = []
- for features in lines:
- if features is not None:
- # Fix nans in features
- replace_token = 0
- features = np.where(np.isnan(features), replace_token, features).astype(np.float32)
- features_list.append(features)
-
- # normalize features
- if mode == "finetune" and args.features_scaling:
- features_scaler_path = os.path.join(args.scaler_path, "features_scaler.ckpt")
- features_list, features_scaler = normalize_data(features_list, replace_nan_token=0, is_training=is_training,
- path=features_scaler_path)
- else:
- features_scaler = None
-
- return features_list, features_scaler
-
-
-class GroverDataSet(DataSet):
- """
- GROVER Dataset.
- """
- def __init__(self, config):
- self.config = config
- self.in_memory = False
- self.is_training = False
- self.mode = None
- self.smiles_path = None
- self.feature_path = None
- self.smiles_list = None
- self.labels_list = None
- self.labels_scaler = None
- self.features_list = None
- self.features_scaler = None
- self.atom_vocab_path = None
- self.bond_vocab_path = None
- super().__init__()
-
- def __getitem__(self, idx):
- smiles = self.smiles_list[idx]
- features = self.features_list[idx]
- labels = self.labels_list[idx]
- return smiles, features, labels
-
- def __len__(self):
- assert len(self.smiles_list) == len(self.features_list)
- return len(self.smiles_list)
-
- def get_features_dim(self):
- features_dim = len(self.features_list[0]) if self.features_list[0] is not None else 0
- return features_dim
-
- def get_num_tasks(self):
- num_tasks = len(self.labels_list[0]) if self.labels_list[0] is not None else 0
- return num_tasks
-
- def process(self, data, **kwargs):
- return data
-
- def download(self, path=None):
- pass
-
- def data_parse(self, idx):
- pass
-
- def set_training_data_src(self, data_src):
- """set_training_data_src"""
- if self.config.parser_name == "eval":
- self.smiles_path = os.path.join(data_src, "bbbp_val.csv")
- self.feature_path = os.path.join(data_src, "bbbp_val.npz")
- self.config.scaler_path = os.path.join(data_src, "bbbp_scaler")
- self.is_training = False
- self.mode = "finetune"
- elif self.config.parser_name == "gen":
- self.smiles_path = os.path.join(data_src, "bbbp_val.csv")
- self.feature_path = os.path.join(data_src, "bbbp_val.npz")
- self.is_training = False
- self.mode = "finetune"
- elif self.config.parser_name == "pretrain":
- self.smiles_path = os.path.join(data_src, "tryout_train.csv")
- self.feature_path = os.path.join(data_src, "tryout_train.npz")
- self.atom_vocab_path = os.path.join(data_src, "tryout_atom_vocab.pkl")
- self.bond_vocab_path = os.path.join(data_src, "tryout_bond_vocab.pkl")
- self.mode = "pretrain"
- self.is_training = True
- else:
- self.smiles_path = os.path.join(data_src, "bbbp_train.csv")
- self.feature_path = os.path.join(data_src, "bbbp_train.npz")
- self.config.scaler_path = os.path.join(data_src, "bbbp_scaler")
- self.mode = "finetune"
- if not os.path.exists(self.config.scaler_path):
- os.makedirs(self.config.scaler_path)
- self.is_training = True
- self.smiles_list, self.labels_list, self.labels_scaler = get_smiles_labels(self.config, self.smiles_path,
- self.is_training, self.mode)
- self.features_list, self.features_scaler = get_features(self.config, self.feature_path, self.is_training,
- self.mode)
-
- def create_iterator(self, num_epochs, **kwargs):
- if self.config.parser_name == "pretrain":
- dataset = self.create_pretrain_dataset()
- iteration = dataset.create_dict_iterator(output_numpy=False)
- else:
- dataset = self.create_grover_dataset()
- iteration = dataset.create_dict_iterator(output_numpy=False)
-
- return iteration
-
- def create_pretrain_dataset(self):
- """
- Create dataset for pretrain model.
- """
- cores = multiprocessing.cpu_count()
- num_parallel_workers = int(cores / self.config.device_num)
-
- # load atom and bond vocabulary and the semantic motif labels.
- atom_vocab = MolVocab.load_vocab(self.atom_vocab_path)
- bond_vocab = MolVocab.load_vocab(self.bond_vocab_path)
- self.config.atom_vocab_size, self.config.bond_vocab_size = len(atom_vocab), len(bond_vocab)
- self.config.fg_size = 85
-
- mol_collator = GroverCollator(shared_dict={}, atom_vocab=atom_vocab, bond_vocab=bond_vocab, args=self.config)
- per_batch_match_op = mol_collator.per_batch_map
-
- dataset_column_names = ["smiles", "features", "none"]
- output_columns = ["f_atoms", "f_bonds", "a2b", "b2a", "b2revb", "a2a", "a_scope", "b_scope", "atom_vocab_label",
- "bond_vocab_label", "fgroup_label"]
- dataset = ds.GeneratorDataset(self, column_names=dataset_column_names,
- shuffle=False, num_shards=self.config.device_num, shard_id=self.config.rank)
- dataset = dataset.batch(batch_size=self.config.batch_size, num_parallel_workers=min(8, num_parallel_workers))
- dataset = dataset.map(operations=per_batch_match_op, input_columns=["smiles", "features"],
- output_columns=output_columns,
- num_parallel_workers=min(8, num_parallel_workers))
- dataset = dataset.project(output_columns)
- return dataset
-
- def create_grover_dataset(self):
- """
- Create dataset for train/eval model.
- """
- labels_scaler = self.labels_scaler
- self.config.num_tasks = self.get_num_tasks()
- self.config.output_size = self.config.num_tasks
- self.config.features_dim = self.get_features_dim()
-
- cores = multiprocessing.cpu_count()
- num_parallel_workers = int(cores / self.config.device_num)
-
- mol_collator = MolCollator({}, self.config)
- per_batch_match_op = mol_collator.per_batch_map
-
- dataset_column_names = ["smiles", "features", "labels"]
- output_columns = ["f_atoms", "f_bonds", "a2b", "b2a", "b2revb", "a2a", "a_scope", "b_scope", "smiles"]
- columns = ["f_atoms", "f_bonds", "a2b", "b2a", "b2revb", "a2a", "a_scope", "b_scope", "smiles", "features",
- "labels"]
- dataset = ds.GeneratorDataset(self, column_names=dataset_column_names,
- shuffle=False, num_shards=self.config.device_num, shard_id=self.config.rank)
- dataset = dataset.batch(batch_size=self.config.batch_size, num_parallel_workers=min(8, num_parallel_workers))
- dataset = dataset.map(operations=per_batch_match_op, input_columns=["smiles"], output_columns=output_columns,
- num_parallel_workers=min(8, num_parallel_workers))
- dataset = dataset.project(columns)
- self.config.labels_scaler = labels_scaler
- return dataset
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""grover_dataset"""
+import os
+import multiprocessing
+import numpy as np
+from mindspore import dataset as ds
+from .split_data import SplitData
+from .save_features import SaveFeatures
+from .build_vocab import BuildVocab
+from .src.util.utils import load_features, load_smiles_labels
+from .src.data.mindsporevocab import MolVocab
+from .src.data.transforms import MolCollator, GroverCollator, normalize_data
+from ...dataset import DataSet
+
+
+
+def get_smiles_labels(args, smiles_file, is_training, mode):
+ """
+ Load and Process smiles and labels.
+ """
+ smiles_list, labels_list = load_smiles_labels(smiles_file)
+
+ if mode == "finetune" and args.dataset_type == "regression":
+ labels_scaler_path = os.path.join(args.scaler_path, "labels_scaler.ckpt")
+ labels_list, labels_scaler = normalize_data(labels_list, replace_nan_token=None, is_training=is_training,
+ path=labels_scaler_path)
+ else:
+ labels_scaler = None
+
+ return smiles_list, labels_list, labels_scaler
+
+
+def get_features(args, features_file, is_training, mode):
+ """
+ Load and Process features.
+ """
+ lines = load_features(features_file)
+ features_list = []
+ for features in lines:
+ if features is not None:
+ # Fix nans in features
+ replace_token = 0
+ features = np.where(np.isnan(features), replace_token, features).astype(np.float32)
+ features_list.append(features)
+
+ # normalize features
+ if mode == "finetune" and args.features_scaling:
+ features_scaler_path = os.path.join(args.scaler_path, "features_scaler.ckpt")
+ features_list, features_scaler = normalize_data(features_list, replace_nan_token=0, is_training=is_training,
+ path=features_scaler_path)
+ else:
+ features_scaler = None
+
+ return features_list, features_scaler
+
+
+class GroverDataSet(DataSet):
+ """
+ GROVER Dataset.
+ """
+ def __init__(self, config):
+ self.config = config
+ self.in_memory = False
+ self.is_training = False
+ self.mode = None
+ self.smiles_path = None
+ self.feature_path = None
+ self.smiles_list = None
+ self.labels_list = None
+ self.labels_scaler = None
+ self.features_list = None
+ self.features_scaler = None
+ self.atom_vocab_path = None
+ self.bond_vocab_path = None
+ self.sd = SplitData()
+ self.sf = SaveFeatures()
+ self.bv = BuildVocab()
+ super().__init__()
+
+ def __getitem__(self, idx):
+ smiles = self.smiles_list[idx]
+ features = self.features_list[idx]
+ labels = self.labels_list[idx]
+ return smiles, features, labels
+
+ def __len__(self):
+ assert len(self.smiles_list) == len(self.features_list)
+ return len(self.smiles_list)
+
+ def get_features_dim(self):
+ features_dim = len(self.features_list[0]) if self.features_list[0] is not None else 0
+ return features_dim
+
+ def get_num_tasks(self):
+ num_tasks = len(self.labels_list[0]) if self.labels_list[0] is not None else 0
+ return num_tasks
+
+ def process(self, data, **kwargs):
+ if self.config.parser_name == "eval":
+ data_path = data.split('.csv')[0]
+ self.sf.generate_and_save_features(data, 'rdkit_2d_normalized', f"{data_path}.npz", 10000, False, True)
+ self.smiles_path = data
+ self.feature_path = data_path + ".npz"
+ scaler_path = data.split(data.split('/')[-1])[0] + (data.split('/')[-1]).split('_')[0] + "_scaler"
+ self.config.scaler_path = scaler_path
+ self.is_training = False
+ self.mode = "finetune"
+ elif self.config.parser_name == "gen":
+ data_path = data.split('.csv')[0]
+ self.smiles_path = data
+ self.feature_path = data_path + ".npz"
+ self.is_training = False
+ self.mode = "finetune"
+ self.smiles_list, self.labels_list, self.labels_scaler = get_smiles_labels(self.config, self.smiles_path,
+ self.is_training, self.mode)
+ self.features_list, self.features_scaler = get_features(self.config, self.feature_path, self.is_training,
+ self.mode)
+ self.config.batch_size = 1
+ dataset = self.create_grover_dataset()
+ iteration = dataset.create_dict_iterator(output_numpy=False, num_epochs=1)
+ for d in iteration:
+ data = d
+ break
+ return data
+
+ def download(self, path=None):
+ pass
+
+ def data_parse(self, idx):
+ pass
+
+ def set_training_data_src(self, data_src):
+ """set_training_data_src"""
+ if self.config.parser_name == "pretrain":
+ data_path = data_src.split(data_src.split('/')[-1])[0]
+ filename = data_src.split('/')[-1].split('.csv')[0]
+ if not os.path.exists(f"{data_path}/{filename}_train.csv"):
+ self.sd.split_data(data_path, filename)
+ self.sf.generate_and_save_features(f"{data_path}/{filename}_train.csv", 'fgtasklabel',
+ f"{data_path}/{filename}_train.npz", 10000, False, True)
+ self.bv.build_vocab(f"{data_path}/{filename}.csv", data_path, 'tryout', 1)
+ self.smiles_path = os.path.join(data_path, f"{filename}_train.csv")
+ self.feature_path = os.path.join(data_path, f"{filename}_train.npz")
+ self.atom_vocab_path = os.path.join(data_path, f"{filename}_atom_vocab.pkl")
+ self.bond_vocab_path = os.path.join(data_path, f"{filename}_bond_vocab.pkl")
+ self.mode = "pretrain"
+ self.is_training = True
+ else:
+ data_path = data_src.split(data_src.split('/')[-1])[0]
+ filename = data_src.split('/')[-1].split('.csv')[0]
+ if not os.path.exists(f"{data_src}/{filename}_train.csv"):
+ self.sd.split_data(data_path, filename)
+ self.sf.generate_and_save_features(f"{data_path}/{filename}_train.csv", 'rdkit_2d_normalized',
+ f"{data_path}/{filename}_train.npz", 10000, False, True)
+ self.smiles_path = os.path.join(data_path, f"{filename}_train.csv")
+ self.feature_path = os.path.join(data_path, f"{filename}_train.npz")
+ self.config.scaler_path = os.path.join(data_path, f"{filename}_scaler")
+ self.mode = "finetune"
+ if not os.path.exists(self.config.scaler_path):
+ os.makedirs(self.config.scaler_path)
+ self.is_training = True
+ self.smiles_list, self.labels_list, self.labels_scaler = get_smiles_labels(self.config, self.smiles_path,
+ self.is_training, self.mode)
+ self.features_list, self.features_scaler = get_features(self.config, self.feature_path, self.is_training,
+ self.mode)
+
+ def create_iterator(self, num_epochs, **kwargs):
+ if self.config.parser_name == "pretrain":
+ dataset = self.create_pretrain_dataset()
+ else:
+ dataset = self.create_grover_dataset()
+ iteration = dataset.create_dict_iterator(output_numpy=False, num_epochs=num_epochs)
+ return iteration
+
+ def create_pretrain_dataset(self):
+ """
+ Create dataset for pretrain model.
+ """
+ cores = multiprocessing.cpu_count()
+ num_parallel_workers = int(cores / self.config.device_num)
+
+ # load atom and bond vocabulary and the semantic motif labels.
+ atom_vocab = MolVocab.load_vocab(self.atom_vocab_path)
+ bond_vocab = MolVocab.load_vocab(self.bond_vocab_path)
+ self.config.atom_vocab_size, self.config.bond_vocab_size = len(atom_vocab), len(bond_vocab)
+ self.config.fg_size = 85
+
+ mol_collator = GroverCollator(shared_dict={}, atom_vocab=atom_vocab, bond_vocab=bond_vocab, args=self.config)
+ per_batch_match_op = mol_collator.per_batch_map
+
+ dataset_column_names = ["smiles", "features", "none"]
+ output_columns = ["f_atoms", "f_bonds", "a2b", "b2a", "b2revb", "a2a", "a_scope", "b_scope", "atom_vocab_label",
+ "bond_vocab_label", "fgroup_label"]
+ dataset = ds.GeneratorDataset(self, column_names=dataset_column_names,
+ shuffle=False, num_shards=self.config.device_num, shard_id=self.config.rank)
+ dataset = dataset.batch(batch_size=self.config.batch_size, num_parallel_workers=min(8, num_parallel_workers))
+ dataset = dataset.map(operations=per_batch_match_op, input_columns=["smiles", "features"],
+ output_columns=output_columns,
+ num_parallel_workers=min(8, num_parallel_workers))
+ dataset = dataset.project(output_columns)
+ return dataset
+
+ def create_grover_dataset(self):
+ """
+ Create dataset for train/eval model.
+ """
+ labels_scaler = self.labels_scaler
+ self.config.num_tasks = self.get_num_tasks()
+ self.config.output_size = self.config.num_tasks
+ self.config.features_dim = self.get_features_dim()
+
+ cores = multiprocessing.cpu_count()
+ num_parallel_workers = int(cores / self.config.device_num)
+
+ mol_collator = MolCollator({}, self.config)
+ per_batch_match_op = mol_collator.per_batch_map
+
+ dataset_column_names = ["smiles", "features", "labels"]
+ output_columns = ["f_atoms", "f_bonds", "a2b", "b2a", "b2revb", "a2a", "a_scope", "b_scope", "smiles"]
+ columns = ["f_atoms", "f_bonds", "a2b", "b2a", "b2revb", "a2a", "a_scope", "b_scope", "smiles", "features",
+ "labels"]
+ dataset = ds.GeneratorDataset(self, column_names=dataset_column_names,
+ shuffle=False, num_shards=self.config.device_num, shard_id=self.config.rank)
+ dataset = dataset.batch(batch_size=self.config.batch_size, num_parallel_workers=min(8, num_parallel_workers))
+ dataset = dataset.map(operations=per_batch_match_op, input_columns=["smiles"], output_columns=output_columns,
+ num_parallel_workers=min(8, num_parallel_workers))
+ dataset = dataset.project(columns)
+ self.config.labels_scaler = labels_scaler
+ return dataset
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/grover/nn_arch.py b/MindSPONGE/mindsponge/python/pipeline/models/grover/nn_arch.py
index 072ace8a4c0020fb02f60a0917d7a47cc48755cf..d1693f83ff1a8624ff4722e4436687fd964ee7c7 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/grover/nn_arch.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/grover/nn_arch.py
@@ -1,4 +1,4 @@
-# Copyright 2022 Huawei Technologies Co., Ltd
+# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/grover/save_features.py b/MindSPONGE/mindsponge/python/pipeline/models/grover/save_features.py
index c38c7ade8f15eb4e16b03fe36b75c255797025bf..ef36960fac98ca093f3ed2803b213523852ab193 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/grover/save_features.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/grover/save_features.py
@@ -1,111 +1,114 @@
-# Copyright 2022 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""
-Computes and saves molecular features for a dataset.
-"""
-import os
-import shutil
-from multiprocessing import Pool
-from tqdm import tqdm
-from .src.util.utils import makedirs, load_features, save_features, load_smiles_labels
-from .src.data.molfeaturegenerator import get_features_generator
-
-
-class SaveFeatures:
- """SaveFeatures"""
- def __init__(self):
- pass
-
- def load_temp(self, temp_dir):
- """
- Loads all features saved as .npz files in load_dir.
-
- Assumes temporary files are named in order 0.npz, 1.npz, ...
-
- :param temp_dir: Directory in which temporary .npz files containing features are stored.
- :return: A tuple with a list of molecule features, where each molecule's features is a list of floats,
- and the number of temporary files.
- """
- features = []
- temp_num = 0
- temp_path = os.path.join(temp_dir, f'{temp_num}.npz')
-
- while os.path.exists(temp_path):
- features.extend(load_features(temp_path))
- temp_num += 1
- temp_path = os.path.join(temp_dir, f'{temp_num}.npz')
-
- return features, temp_num
-
- def generate_and_save_features(self, data_path, features_generator, save_path, save_frequency, restart, sequential):
- """
- Computes and saves features for a dataset of molecules as a 2D array in a .npz file.
-
- :param args: Arguments.
- """
- # Create directory for save_path
- makedirs(save_path, isfile=True)
-
- # Get data and features function
- mols, _ = load_smiles_labels(data_path)
- features_generator = get_features_generator(features_generator)
- temp_save_dir = save_path + '_temp'
-
- # Load partially complete data
- if restart:
- if os.path.exists(save_path):
- os.remove(save_path)
- if os.path.exists(temp_save_dir):
- shutil.rmtree(temp_save_dir)
- else:
- if os.path.exists(save_path):
- raise ValueError(f'"{save_path}" already exists and args.restart is False.')
-
- if os.path.exists(temp_save_dir):
- features, temp_num = self.load_temp(temp_save_dir)
-
- if not os.path.exists(temp_save_dir):
- makedirs(temp_save_dir)
- features, temp_num = [], 0
-
- # Build features map function
- mols = mols[len(features):] # restrict to data for which features have not been computed yet
-
- if sequential:
- features_map = map(features_generator, mols)
- else:
- features_map = Pool(30).imap(features_generator, mols)
-
- # Get features
- temp_features = []
- for i, feats in tqdm(enumerate(features_map), total=len(mols)):
- temp_features.append(feats)
-
- # Save temporary features every save_frequency
- if (i > 0 and (i + 1) % save_frequency == 0) or i == len(mols) - 1:
- save_features(os.path.join(temp_save_dir, f'{temp_num}.npz'), temp_features)
- features.extend(temp_features)
- temp_features = []
- temp_num += 1
-
- try:
- # Save all features
- save_features(save_path, features)
-
- # Remove temporary features
- shutil.rmtree(temp_save_dir)
- except OverflowError:
- print('Features array is too large to save as a single file.'
- 'Instead keeping features as a directory of files.')
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""
+Computes and saves molecular features for a dataset.
+"""
+import os
+import shutil
+from multiprocessing import Pool
+from tqdm import tqdm
+from .src.util.utils import makedirs, load_features, save_features, load_smiles_labels
+from .src.data.molfeaturegenerator import get_features_generator
+
+
+class SaveFeatures:
+ """SaveFeatures"""
+ def __init__(self):
+ pass
+
+ def load_temp(self, temp_dir):
+ """
+ Loads all features saved as .npz files in load_dir.
+
+ Assumes temporary files are named in order 0.npz, 1.npz, ...
+
+ :param temp_dir: Directory in which temporary .npz files containing features are stored.
+ :return: A tuple with a list of molecule features, where each molecule's features is a list of floats,
+ and the number of temporary files.
+ """
+ features = []
+ temp_num = 0
+ temp_path = os.path.join(temp_dir, f'{temp_num}.npz')
+
+ while os.path.exists(temp_path):
+ features.extend(load_features(temp_path))
+ temp_num += 1
+ temp_path = os.path.join(temp_dir, f'{temp_num}.npz')
+
+ return features, temp_num
+
+ def generate_and_save_features(self, data_path, features_generator, save_path, save_frequency, restart, sequential):
+ """
+ Computes and saves features for a dataset of molecules as a 2D array in a .npz file.
+
+ :param args: Arguments.
+ """
+
+ if os.path.exists(save_path):
+ return
+ # Create directory for save_path
+ makedirs(save_path, isfile=True)
+
+ # Get data and features function
+ mols, _ = load_smiles_labels(data_path)
+ features_generator = get_features_generator(features_generator)
+ temp_save_dir = save_path + '_temp'
+
+ # Load partially complete data
+ if restart:
+ if os.path.exists(save_path):
+ os.remove(save_path)
+ if os.path.exists(temp_save_dir):
+ shutil.rmtree(temp_save_dir)
+ else:
+ if os.path.exists(save_path):
+ raise ValueError(f'"{save_path}" already exists and args.restart is False.')
+
+ if os.path.exists(temp_save_dir):
+ features, temp_num = self.load_temp(temp_save_dir)
+
+ if not os.path.exists(temp_save_dir):
+ makedirs(temp_save_dir)
+ features, temp_num = [], 0
+
+ # Build features map function
+ mols = mols[len(features):] # restrict to data for which features have not been computed yet
+
+ if sequential:
+ features_map = map(features_generator, mols)
+ else:
+ features_map = Pool(30).imap(features_generator, mols)
+
+ # Get features
+ temp_features = []
+ for i, feats in tqdm(enumerate(features_map), total=len(mols)):
+ temp_features.append(feats)
+
+ # Save temporary features every save_frequency
+ if (i > 0 and (i + 1) % save_frequency == 0) or i == len(mols) - 1:
+ save_features(os.path.join(temp_save_dir, f'{temp_num}.npz'), temp_features)
+ features.extend(temp_features)
+ temp_features = []
+ temp_num += 1
+
+ try:
+ # Save all features
+ save_features(save_path, features)
+
+ # Remove temporary features
+ shutil.rmtree(temp_save_dir)
+ except OverflowError:
+ print('Features array is too large to save as a single file.'
+ 'Instead keeping features as a directory of files.')
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/grover/split_data.py b/MindSPONGE/mindsponge/python/pipeline/models/grover/split_data.py
index 4ca714e9795ccd54cc49a93158973c6d24a2ef86..1d709a4d80d165c83f510afe701c63765ef24d18 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/grover/split_data.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/grover/split_data.py
@@ -1,66 +1,66 @@
-# Copyright 2022 Huawei Technologies Co., Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ============================================================================
-"""Split dataset"""
-import random
-import csv
-import os
-import stat
-from .src.util.utils import load_smiles
-
-TRAIN_PERCENT = 0.8
-VAL_PERCENT = 0.2
-
-random.seed(3)
-
-
-class SplitData:
- """SplitData"""
- def __init__(self):
- pass
-
- def split_data(self, data_dir, file_name):
- """
- Split data for training and evaluating.
- """
- data_path = os.path.join(data_dir, file_name + ".csv")
- train_data_path = os.path.join(data_dir, file_name + "_train.csv")
- val_data_path = os.path.join(data_dir, file_name + "_val.csv")
-
- smiles = load_smiles(data_path)
- num_smiles = len(smiles)
-
- list_smiles = range(num_smiles)
-
- num_train = int(num_smiles * TRAIN_PERCENT)
- num_val = int(num_smiles * VAL_PERCENT)
-
- train = random.sample(list_smiles, num_train)
- val_test = [i for i in list_smiles if not i in train]
- val = random.sample(val_test, num_val)
- print("train: {}, val: {}".format(len(train), len(val)))
-
- flags = os.O_WRONLY | os.O_CREAT
- modes = stat.S_IWUSR | stat.S_IRUSR
- with os.fdopen(os.open(train_data_path, flags, modes), 'w', newline='') as train_file:
- train_writer = csv.writer(train_file)
- train_writer.writerow(["smiles"])
- for i in train:
- train_writer.writerow(smiles[i])
-
- with os.fdopen(os.open(val_data_path, flags, modes), 'w', newline='') as val_file:
- val_writer = csv.writer(val_file)
- val_writer.writerow(["smiles"])
- for i in val:
- val_writer.writerow(smiles[i])
+# Copyright 2023 Huawei Technologies Co., Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Split dataset"""
+import random
+import csv
+import os
+import stat
+from .src.util.utils import load_smiles
+
+TRAIN_PERCENT = 1.0
+VAL_PERCENT = 0.0
+
+random.seed(3)
+
+
+class SplitData:
+ """SplitData"""
+ def __init__(self):
+ pass
+
+ def split_data(self, data_dir, file_name):
+ """
+ Split data for training and evaluating.
+ """
+ data_path = os.path.join(data_dir, file_name + ".csv")
+ train_data_path = os.path.join(data_dir, file_name + "_train.csv")
+ val_data_path = os.path.join(data_dir, file_name + "_val.csv")
+
+ smiles = load_smiles(data_path)
+ num_smiles = len(smiles)
+
+ list_smiles = range(num_smiles)
+
+ num_train = int(num_smiles * TRAIN_PERCENT)
+ num_val = int(num_smiles * VAL_PERCENT)
+
+ train = random.sample(list_smiles, num_train)
+ val_test = [i for i in list_smiles if not i in train]
+ val = random.sample(val_test, num_val)
+ print("train: {}, val: {}".format(len(train), len(val)))
+
+ flags = os.O_WRONLY | os.O_CREAT
+ modes = stat.S_IWUSR | stat.S_IRUSR
+ with os.fdopen(os.open(train_data_path, flags, modes), 'w', newline='') as train_file:
+ train_writer = csv.writer(train_file)
+ train_writer.writerow(["smiles"])
+ for i in train:
+ train_writer.writerow(smiles[i])
+
+ with os.fdopen(os.open(val_data_path, flags, modes), 'w', newline='') as val_file:
+ val_writer = csv.writer(val_file)
+ val_writer.writerow(["smiles"])
+ for i in val:
+ val_writer.writerow(smiles[i])
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/megaevogen/evogen.py b/MindSPONGE/mindsponge/python/pipeline/models/megaevogen/evogen.py
index 8f6a5d3823ef394aa34e15704c33b8b2a245cd9d..f01379e9e83b0f2517c449efda001b1443ca9895 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/megaevogen/evogen.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/megaevogen/evogen.py
@@ -115,7 +115,7 @@ class MEGAEvoGen(Model):
feature["prev_msa_first_row"] = np.zeros((aatype.shape[1], 256)).astype(np.float32)
feature["prev_pair"] = np.zeros((aatype.shape[1], aatype.shape[1], 128)).astype(np.float32)
return feature
- return reconstruct_msa
+ return reconstruct_msa, reconstruct_msa_mask
def loss(self, data):
pass
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/megaevogen/evogen_dataprocess.py b/MindSPONGE/mindsponge/python/pipeline/models/megaevogen/evogen_dataprocess.py
index 6bdb1b1c2f889e7027b52cd969cd1684fc46d075..f8270404165659a6cc37a23565753bc6d72901cc 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/megaevogen/evogen_dataprocess.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/megaevogen/evogen_dataprocess.py
@@ -56,14 +56,6 @@ class MEGAEvoGenDataSet(PSP):
self.in_memory = False
self.phase = None
self.use_pkl = config.use_pkl
- if self.use_pkl:
- self.training_data_src = config.data_path
- self.training_pkl_path = self.training_data_src + "/pkl/"
- self.training_pdb_path = self.training_data_src + "/pdb/"
- self.training_pdb_items = [self.training_pdb_path + key
- for key in sorted(os.listdir(self.training_pdb_path))]
- self.training_pkl_items = [self.training_pkl_path + key
- for key in sorted(os.listdir(self.training_pkl_path))]
self.data_process = [
dict_replace_key(['deletion_matrix_int', 'deletion_matrix']),
dict_expand_dims(keys=["deletion_matrix", "msa"], axis=-1),
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/megafold/megafold.py b/MindSPONGE/mindsponge/python/pipeline/models/megafold/megafold.py
index c8f0e97d5d8f8384797e38a740430aa0a92bf423..bf21aa9623f664f2f286ed0764c700e714672137 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/megafold/megafold.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/megafold/megafold.py
@@ -106,15 +106,12 @@ class MEGAFold(Model):
def forward(self, data):
"forward"
- feat = []
- for key in self.feature_list:
- feat.append(data[key])
if self.use_jit:
prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits \
- = self._jit_forward(feat)
+ = self._jit_forward(data)
else:
prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits \
- = self._pynative_forward(feat)
+ = self._pynative_forward(data)
res = prev_pos, prev_msa_first_row, prev_pair, predicted_lddt_logits
return res
@@ -219,10 +216,16 @@ class MEGAFold(Model):
@jit
- def _jit_forward(self, feat):
+ def _jit_forward(self, data):
+ feat = []
+ for key in self.feature_list:
+ feat.append(data[key])
res = self.network(*feat)
return res
- def _pynative_forward(self, feat):
+ def _pynative_forward(self, data):
+ feat = []
+ for key in self.feature_list:
+ feat.append(data[key])
res = self.network(*feat)
return res
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/megafold/module/loss_module.py b/MindSPONGE/mindsponge/python/pipeline/models/megafold/module/loss_module.py
index 3bd12310cfe9a39c794dfecd8ec272d6fb79f388..38c1664826f8d412ca99edd1e20cd82f5fcd6203 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/megafold/module/loss_module.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/megafold/module/loss_module.py
@@ -97,7 +97,7 @@ class LossNet(nn.Cell):
def softmax_cross_entropy(self, logits, labels):
"""Computes softmax cross entropy given logits and one-hot class labels."""
- loss = -mnp.sum(labels * P.Log()(nn.Softmax()(logits)), axis=-1)
+ loss = -mnp.sum(labels * nn.LogSoftmax()(logits), axis=-1)
return mnp.asarray(loss)
def distogram_loss(self, logits, bin_edges, pseudo_beta, pseudo_beta_mask):
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/nn_arch.py b/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/nn_arch.py
index 65759e7aede22797dfa7efa3c987ed409838616b..324cde159b19241fe6f4f206422b7cac7caf1b33 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/nn_arch.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/nn_arch.py
@@ -21,6 +21,8 @@ import mindspore.ops as ops
from mindspore import Tensor
from mindspore.common.initializer import initializer, XavierUniform
+from .utils import ProcessLinspace
+
def gather_edges(edges, neighbor_idx):
# Features [B,N,N,C] at Neighbor indices [B,N,K] => Neighbor features [B,N,K,C]
@@ -65,9 +67,9 @@ class EncLayer(nn.Cell):
self.num_hidden = num_hidden
self.num_in = num_in
self.scale = scale
- self.dropout1 = nn.Dropout(1 - dropout)
- self.dropout2 = nn.Dropout(1 - dropout)
- self.dropout3 = nn.Dropout(1 - dropout)
+ self.dropout1 = nn.Dropout(p=dropout)
+ self.dropout2 = nn.Dropout(p=dropout)
+ self.dropout3 = nn.Dropout(p=dropout)
self.norm1 = nn.LayerNorm([num_hidden])
self.norm2 = nn.LayerNorm([num_hidden])
self.norm3 = nn.LayerNorm([num_hidden])
@@ -116,8 +118,8 @@ class DecLayer(nn.Cell):
self.num_hidden = num_hidden
self.num_in = num_in
self.scale = scale
- self.dropout1 = nn.Dropout(1 - dropout)
- self.dropout2 = nn.Dropout(1 - dropout)
+ self.dropout1 = nn.Dropout(p=dropout)
+ self.dropout2 = nn.Dropout(p=dropout)
self.norm1 = nn.LayerNorm([num_hidden])
self.norm2 = nn.LayerNorm([num_hidden])
@@ -254,7 +256,7 @@ class ProteinFeatures(nn.Cell):
:]) == 0), ms.int32) # find self vs non-self interaction
e_chains = gather_edges(d_chains[:, :, :, None], e_idx)[:, :, :, 0]
e_positional = self.embeddings(ops.Cast()(offset, ms.int32), e_chains)
- e = ops.Concat(axis=-1)((e_positional, rbf_all))
+ e = ops.Concat(axis=-1)((e_positional, rbf_all.astype(ms.float16)))
e = self.edge_embedding(e)
e = self.norm_edges(e)
return e, e_idx
@@ -280,7 +282,7 @@ class ProteinFeatures(nn.Cell):
def _rbf(self, d):
d_min, d_max, d_count = 2., 22., self.num_rbf
- d_mu = ops.linspace(Tensor(d_min, ms.float32), Tensor(d_max, ms.float32), d_count)
+ d_mu = ProcessLinspace()(Tensor(d_min, ms.float32), Tensor(d_max, ms.float32), d_count)
d_mu = d_mu.view((1, 1, 1, -1))
d_sigma = (d_max - d_min) / d_count
d_expand = ops.expand_dims(d, -1)
@@ -477,22 +479,23 @@ class ProteinMPNN(nn.Cell):
ms.numpy.tile(t[:, None, None], (1, 1, h_v_stack[-1].shape[-1])))[:, 0]
logits = self.w_out(h_v_t) / temperature
probs = ops.Softmax(axis=-1)((logits - constant[None, :] * 1e8 + constant_bias[None, \
- :] / temperature + bias_by_res_gathered / temperature).astype(ms.float32))
+ :] / temperature + bias_by_res_gathered / temperature).astype(ms.float32))
if pssm_bias_flag:
pssm_coef_gathered = ops.GatherD()(pssm_coef, 1, t[:, None])[:, 0]
pssm_bias_gathered = ops.GatherD()(pssm_bias, 1, ms.numpy.tile(t[:, None, None], \
- (1, 1, pssm_bias.shape[-1])))[:, 0]
+ (1, 1, pssm_bias.shape[-1])))[:, 0]
probs = (1 - pssm_multi * pssm_coef_gathered[:, None]) * probs + \
pssm_multi * pssm_coef_gathered[:, None] * pssm_bias_gathered
if pssm_log_odds_flag:
pssm_log_odds_mask_gathered = ops.GatherD()(pssm_log_odds_mask, 1, \
- ms.numpy.tile(t[:, None, None], (1, 1, pssm_log_odds_mask.shape[-1])))[:, 0]
+ ms.numpy.tile(t[:, None, None], \
+ (1, 1, pssm_log_odds_mask.shape[-1])))[:, 0]
probs_masked = probs * pssm_log_odds_mask_gathered
probs_masked += probs * 0.001
probs = probs_masked / ops.ReduceSum(keep_dims=True)(probs_masked, axis=-1)
if omit_aa_mask_flag:
omit_aa_mask_gathered = ops.GatherD()(omit_aa_mask, 1, ms.numpy.tile(t[:, None, None], \
- (1, 1, omit_aa_mask.shape[-1])))[:, 0]
+ (1, 1, omit_aa_mask.shape[-1])))[:, 0]
probs_masked = probs * (1.0 - omit_aa_mask_gathered)
probs = probs_masked / ops.ReduceSum(keep_dims=True)(probs_masked, axis=-1) # [B, 21]
probs_ = np.squeeze(probs.asnumpy(), axis=0).astype("float64")
@@ -597,7 +600,7 @@ class ProteinMPNN(nn.Cell):
else:
bias_by_res_gathered = bias_by_res[:, t, :] # [B, 21]
probs = ops.Softmax(axis=-1)((logits - constant[None, :] * 1e8 + constant_bias[None, \
- :] / temperature + bias_by_res_gathered / temperature).astype(ms.float32))
+ :] / temperature + bias_by_res_gathered / temperature).astype(ms.float32))
if pssm_bias_flag:
pssm_coef_gathered = pssm_coef[:, t]
pssm_bias_gathered = pssm_bias[:, t]
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnn.py b/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnn.py
index 64aec23c17ed999b3ad05e24724a4703aca82173..bde7e914a7f8b5a95aa27c7658dc3a9f8870578a 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnn.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnn.py
@@ -15,13 +15,14 @@
"proteinmpnn"
import mindspore as ms
import mindspore.ops as ops
-from mindspore import jit, context, nn, load_checkpoint
+from mindspore import jit, context, nn
from mindspore import Tensor
from ..model import Model
from .nn_arch import ProteinMPNN
-from .utils import scores_, loss_nll
+from .utils import scores_, loss_nll, ProcessLinspace
from .proteinmpnn_wrapcell import CustomTrainOneStepCell, CustomWithLossCell, LossSmoothed, LRLIST
+from .proteinmpnn_dataset import ProteinMpnnDataset
class ProteinMpnn(Model):
@@ -39,6 +40,10 @@ class ProteinMpnn(Model):
self.config = config
self.use_jit = self.config.use_jit
self.network = ProteinMPNN(self.config)
+ self.white_list = ProcessLinspace
+ self.dataset = ProteinMpnnDataset(self.config)
+ self.checkpoint_url = ""
+ self.checkpoint_path = ""
if self.config.is_training:
loss = LossSmoothed()
net_with_loss = CustomWithLossCell(self.network, loss)
@@ -51,9 +56,7 @@ class ProteinMpnn(Model):
self.checkpoint_url = \
'https://download.mindspore.cn/mindscience/mindsponge/ProteinMPNN/checkpoint/proteinmpnn.ckpt'
self.checkpoint_path = "./proteinmpnn.ckpt"
- param_dict = load_checkpoint(self.checkpoint_path)
- ms.load_param_into_net(self.network, param_dict)
- super().__init__(self.checkpoint_url, self.network, self.name)
+ super().__init__(self.checkpoint_url, self.checkpoint_path, self.network, self.name, self.white_list)
def forward(self, data):
pass
@@ -65,6 +68,7 @@ class ProteinMpnn(Model):
return log_probs
def train_step(self, data):
+ data = self.dataset.process(data)
log_probs = self.backward(data)
loss, _, true_false = loss_nll(data[1], log_probs, data[-1])
train_sum, train_weights = 0., 0.
@@ -134,6 +138,4 @@ class ProteinMpnn(Model):
def _pynative_forward(self, data):
log_probs = self.network(*data)
-
- print(outputs)
return log_probs
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnn_dataset.py b/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnn_dataset.py
index 6d65027c60c074ccd4c2dbc41ca3d9e1f516eae2..03b6fbb48e5b8077b305a04a878c8b120362d2d8 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnn_dataset.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnn_dataset.py
@@ -17,12 +17,14 @@ import os
import pickle
from ...dataset import PSP, data_process_run
+
from .proteinmpnndata import pre_process, tied_featurize, featurize
-from .dataset import StructureDatasetPDB, Definebatch
+from .dataset import StructureDatasetPDB, Definebatch, parse_pdb
class ProteinMpnnDataset(PSP):
"""proteinmpnndataset"""
+
def __init__(self, config):
self.config = config
self.supported_models = ['Proteinmpnn']
@@ -31,9 +33,9 @@ class ProteinMpnnDataset(PSP):
self.is_training = self.config.is_training
self.proteinmpnn_inputs()
if self.is_training:
- self.data_process = [featurize]
+ self.data_process = [featurize()]
else:
- self.data_process = [pre_process, tied_featurize]
+ self.data_process = [pre_process(), tied_featurize()]
super().__init__()
# pylint: disable=arguments-differ
@@ -69,7 +71,13 @@ class ProteinMpnnDataset(PSP):
# pylint: disable=arguments-differ
def process(self, data):
- features = data_process_run(data, self.data_process)
+ pdb_dict_list = parse_pdb(data)
+ all_chain_list = [item[-1:] for item in list(pdb_dict_list[0]) if item[:9] == 'seq_chain']
+ designed_chain_list = all_chain_list
+ fixed_chain_list = [letter for letter in all_chain_list if letter not in designed_chain_list]
+ chain_id_dict = {}
+ chain_id_dict[pdb_dict_list[0]['name']] = (designed_chain_list, fixed_chain_list)
+ features = data_process_run(pdb_dict_list.copy(), self.data_process)
return features
def set_training_data_src(self, data_src):
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnndata.py b/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnndata.py
index 219dbce49ca3ed9e578f2c8a09d0052c107ee62e..e37659de8c9dfe4cf811f971bcfbd1c546d1d935 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnndata.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/proteinmpnndata.py
@@ -13,7 +13,6 @@
# limitations under the License.
# ============================================================================
"proteinmpnndata"
-import json
import random
import numpy as np
@@ -23,7 +22,7 @@ from ...dataset import curry1
@curry1
-def pre_process(feature):
+def pre_process(feature=None):
"pre_process"
alphabet = 'ACDEFGHIKLMNPQRSTVWYX-'
alphabet_set = {a for a in alphabet}
@@ -35,15 +34,14 @@ def pre_process(feature):
data = []
- lines = feature
- for _, line in enumerate(lines):
- entry = json.loads(line)
+ pdb_dict_list = feature
+ for _, entry in enumerate(pdb_dict_list):
seq = entry['seq']
# Check if in alphabet
bad_chars = {s for s in seq}.difference(alphabet_set)
if not bad_chars:
- if len(entry['seq']) <= 100:
+ if len(entry['seq']) <= 1000:
data.append(entry)
else:
discard_count['too_long'] += 1
@@ -53,7 +51,7 @@ def pre_process(feature):
@curry1
-def tied_featurize(batch, chain_dict=None, fixed_position_dict=None, omit_aa_dict=None, tied_positions_dict=None,
+def tied_featurize(batch=None, chain_dict=None, fixed_position_dict=None, omit_aa_dict=None, tied_positions_dict=None,
pssm_dict=None, bias_by_res_dict=None):
""" Pack and pad batch into tensors """
alphabet = 'ACDEFGHIKLMNPQRSTVWYX'
@@ -379,7 +377,7 @@ def batch_(batch, l_max, residue_idx, chain_m, chain_encoding_all, x, s, alphabe
@curry1
-def featurize(batch):
+def featurize(batch=None):
"""featurize"""
alphabet = 'ACDEFGHIKLMNPQRSTVWYX'
b = len(batch)
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/utils.py b/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/utils.py
index 108be7ef8df23f2af17c759cb1b195df38e744f7..026b18542278743fd2ab1c0512d7c52cac72a937 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/utils.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/proteinmpnn/utils.py
@@ -23,6 +23,16 @@ import mindspore.nn as nn
import mindspore.ops as ops
+class ProcessLinspace(nn.Cell):
+ def __init__(self):
+ super(ProcessLinspace, self).__init__()
+ self.linspace = ops.LinSpace()
+
+ def construct(self, d_min, d_max, d_count):
+ output = self.linspace(d_min, d_max, d_count)
+ return output
+
+
def scores_(s, log_probs, mask):
""" Negative log probabilities """
criterion = ops.NLLLoss(reduction='none')
diff --git a/MindSPONGE/mindsponge/python/pipeline/models/ufold/ufold.py b/MindSPONGE/mindsponge/python/pipeline/models/ufold/ufold.py
index 1144bd902cfdfdae953360fa28b85e74a23fa730..e9f1d3a328bb4270d1b027a726d41fc2cbc64fca 100644
--- a/MindSPONGE/mindsponge/python/pipeline/models/ufold/ufold.py
+++ b/MindSPONGE/mindsponge/python/pipeline/models/ufold/ufold.py
@@ -140,6 +140,7 @@ class UFold(Model):
pred_contacts = self.network(seq_embedding_batch)
contact_masks = ops.ZerosLike()(pred_contacts)
contact_masks[:, :seq_lens.item(0), :seq_lens.item(0)] = 1
+ contact_masks = contact_masks.astype(ms.float32)
feat = [seq_embedding_batch, contact_masks, contacts_batch]
feat = mutable(feat)
loss = self.backward(feat)
diff --git a/MindSPONGE/mindsponge/python/pipeline/pipeline.py b/MindSPONGE/mindsponge/python/pipeline/pipeline.py
index 9e7c75b96345a2621591cdd04d4dff78666295da..fdc64c10f1ca4b8a99734813efd0f0229bfc6f80 100644
--- a/MindSPONGE/mindsponge/python/pipeline/pipeline.py
+++ b/MindSPONGE/mindsponge/python/pipeline/pipeline.py
@@ -24,6 +24,8 @@ from .models import DeepDR, DeepDRDataSet, deepdr_configuration
from .models import DeepFri, DeepFriDataSet, deepfri_configuration
from .models import ESM, ESMDataSet, esm_configuration
from .models import ESM2, ESM2DataSet, esm2_configuration
+from .models import GraphDTA, GraphDTADataSet, graphdta_configuration
+from .models import Grover, GroverDataSet, grover_configuration
from .models import KGNN, KGNNDataSet, kgnn_configuration
from .models import MEGAAssessment, MEGAAssessmentDataSet, megaassessment_configuration
from .models import MEGAEvoGen, MEGAEvoGenDataSet, megaevogen_configuration
@@ -36,8 +38,10 @@ model_card = {
"ColabDesign": {"model": COLABDESIGN, "dataset": ColabDesignDataSet, "config": colabdesign_configuration},
"DeepDR": {"model": DeepDR, "dataset": DeepDRDataSet, "config": deepdr_configuration},
"DeepFri": {"model": DeepFri, "dataset": DeepFriDataSet, "config": deepfri_configuration},
- "ESM": {"model": ESM, "dataset": ESMDataSet, "config": esm_configuration},
+ "ESM_IF1": {"model": ESM, "dataset": ESMDataSet, "config": esm_configuration},
"ESM2": {"model": ESM2, "dataset": ESM2DataSet, "config": esm2_configuration},
+ "GraphDTA": {"model": GraphDTA, "dataset": GraphDTADataSet, "config": graphdta_configuration},
+ "Grover": {"model": Grover, "dataset": GroverDataSet, "config": grover_configuration},
"KGNN": {"model": KGNN, "dataset": KGNNDataSet, "config": kgnn_configuration},
"MEGAAssessment": {"model": MEGAAssessment, "dataset": MEGAAssessmentDataSet,
"config": megaassessment_configuration},
diff --git a/MindSPONGE/mindsponge/python/potential/bias/bias.py b/MindSPONGE/mindsponge/python/potential/bias/bias.py
index 60ae3297302d4bd9a24a544bc2989b01fb4aebe9..60eefb8556e1cadc9e5937fba3fa00ad25dfedec 100644
--- a/MindSPONGE/mindsponge/python/potential/bias/bias.py
+++ b/MindSPONGE/mindsponge/python/potential/bias/bias.py
@@ -84,7 +84,7 @@ class Bias(EnergyCell):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -97,8 +97,8 @@ class Bias(EnergyCell):
Index of neighbour atoms. Default: None
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour atoms. Default: None
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms. Default: None
pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/bias/harmonic.py b/MindSPONGE/mindsponge/python/potential/bias/harmonic.py
index 810ca53b58acf16b933398bc488c4506545ff5c7..769849d6bdb418daa87bc521356725a3af148492 100644
--- a/MindSPONGE/mindsponge/python/potential/bias/harmonic.py
+++ b/MindSPONGE/mindsponge/python/potential/bias/harmonic.py
@@ -113,7 +113,7 @@ class HarmonicOscillator(Bias):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -126,8 +126,8 @@ class HarmonicOscillator(Bias):
Index of neighbour atoms. Default: None
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour atoms. Default: None
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms. Default: None
pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/bias/oscillator.py b/MindSPONGE/mindsponge/python/potential/bias/oscillator.py
index 52ba72848474b90fa944db708486275bc078d55b..a2cd1e41b4c3cc4ce474cc80792a65299ffb637f 100644
--- a/MindSPONGE/mindsponge/python/potential/bias/oscillator.py
+++ b/MindSPONGE/mindsponge/python/potential/bias/oscillator.py
@@ -57,12 +57,14 @@ class OscillatorBias(Bias):
self.old_crd = Tensor(old_crd, ms.float32)
self.k = Tensor(k, ms.float32)
self.nonh_mask = Tensor(1 - nonh_mask, ms.int32)
+ if self.nonh_mask.ndim == 1:
+ self.nonh_mask = self.nonh_mask[None, :, None]
def construct(self,
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
diff --git a/MindSPONGE/mindsponge/python/potential/bias/spherical.py b/MindSPONGE/mindsponge/python/potential/bias/spherical.py
index c40ff8244318dc898eaf0b6a51065e87c607fcfc..73c30bca28f7c92eca3dec0688e95c110196bb03 100644
--- a/MindSPONGE/mindsponge/python/potential/bias/spherical.py
+++ b/MindSPONGE/mindsponge/python/potential/bias/spherical.py
@@ -22,9 +22,11 @@
# ============================================================================
"""Base cell for bais potential"""
+from inspect import signature
+
import mindspore as ms
from mindspore import Tensor
-from mindspore import nn
+from mindspore import nn, ops
from mindspore.ops import functional as F
from .bias import Bias
@@ -95,13 +97,16 @@ class SphericalRestrict(Bias):
depth = depth(self.units)
self.depth = Tensor(depth, ms.float32)
- self.norm_last_dim = nn.Norm(-1, False)
+ self.norm_last_dim = None
+ # MindSpore < 2.0.0-rc1
+ if 'ord' not in signature(ops.norm).parameters.keys():
+ self.norm_last_dim = nn.Norm(-1, False)
def construct(self,
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -114,8 +119,8 @@ class SphericalRestrict(Bias):
Index of neighbour atoms. Default: None
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour atoms. Default: None
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms. Default: None
pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
@@ -132,8 +137,13 @@ class SphericalRestrict(Bias):
"""
+ # (B, A, D) - (D)
+ vector = coordinate - self.center
# (B, A) <- (B, A, D)
- distance = self.norm_last_dim(coordinate - self.center)
+ if self.norm_last_dim is None:
+ distance = ops.norm(vector, None, -1)
+ else:
+ distance = self.norm_last_dim(vector)
diff = distance - self.radius
bias = self.force_constant * F.log1p(F.exp(diff/self.depth))
diff --git a/MindSPONGE/mindsponge/python/potential/bias/wall.py b/MindSPONGE/mindsponge/python/potential/bias/wall.py
index 64fddb2f6ea1e3008f08f88755765d99fdbbe33b..2c9a7dc2f156a5b11649543c377d883bb985f96d 100644
--- a/MindSPONGE/mindsponge/python/potential/bias/wall.py
+++ b/MindSPONGE/mindsponge/python/potential/bias/wall.py
@@ -111,7 +111,7 @@ class WallBias(Bias):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -124,8 +124,8 @@ class WallBias(Bias):
Index of neighbour atoms. Default: None
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour atoms. Default: None
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms. Default: None
pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/energy/angle.py b/MindSPONGE/mindsponge/python/potential/energy/angle.py
index e1165e7e6ccdd57bc318884efd57a6b8e146a145..07c0c47e082bd71a04651e1cbd9fb49b91645f7e 100644
--- a/MindSPONGE/mindsponge/python/potential/energy/angle.py
+++ b/MindSPONGE/mindsponge/python/potential/energy/angle.py
@@ -33,7 +33,7 @@ from mindspore.ops import functional as F
from .energy import EnergyCell
from ...colvar import Angle
from ...function import functions as func
-from ...function import get_ms_array
+from ...function import get_ms_array, get_arguments
class AngleEnergy(EnergyCell):
@@ -94,6 +94,7 @@ class AngleEnergy(EnergyCell):
length_unit: str = 'nm',
energy_unit: str = 'kj/mol',
name: str = 'angle',
+ **kwargs,
):
super().__init__(
@@ -102,6 +103,7 @@ class AngleEnergy(EnergyCell):
length_unit=length_unit,
energy_unit=energy_unit,
)
+ self._kwargs = get_arguments(locals(), kwargs)
if parameters is not None:
length_unit = parameters.get('length_unit')
@@ -157,7 +159,7 @@ class AngleEnergy(EnergyCell):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -170,8 +172,8 @@ class AngleEnergy(EnergyCell):
Index of neighbour atoms.
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour index.
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms.
inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/energy/bond.py b/MindSPONGE/mindsponge/python/potential/energy/bond.py
index ea957ee735f6a26d41cc5266e7b4db3a059b5c6e..77f45ac33ec11b43c59de5f18fcca51124fd8416 100644
--- a/MindSPONGE/mindsponge/python/potential/energy/bond.py
+++ b/MindSPONGE/mindsponge/python/potential/energy/bond.py
@@ -33,7 +33,7 @@ from mindspore.ops import functional as F
from .energy import EnergyCell
from ...colvar import Distance
from ...function import functions as func
-from ...function import get_ms_array
+from ...function import get_ms_array, get_arguments
class BondEnergy(EnergyCell):
@@ -94,6 +94,7 @@ class BondEnergy(EnergyCell):
length_unit: str = 'nm',
energy_unit: str = 'kj/mol',
name: str = 'bond',
+ **kwargs,
):
super().__init__(
@@ -102,6 +103,7 @@ class BondEnergy(EnergyCell):
length_unit=length_unit,
energy_unit=energy_unit,
)
+ self._kwargs = get_arguments(locals(), kwargs)
if parameters is not None:
length_unit = parameters.get('length_unit')
@@ -158,7 +160,7 @@ class BondEnergy(EnergyCell):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -171,8 +173,8 @@ class BondEnergy(EnergyCell):
Index of neighbour atoms.
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour index.
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms.
inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/energy/coulomb.py b/MindSPONGE/mindsponge/python/potential/energy/coulomb.py
index e7f38c0025ef1f2931609efcbd8976f871be0dd2..d48ab87da5e90425e3007580c8e7657855be02df 100644
--- a/MindSPONGE/mindsponge/python/potential/energy/coulomb.py
+++ b/MindSPONGE/mindsponge/python/potential/energy/coulomb.py
@@ -37,7 +37,7 @@ from mindspore.ops import functional as F
from ...colvar import Distance
from .energy import NonbondEnergy
from ...function import functions as func
-from ...function import gather_value, get_ms_array
+from ...function import gather_value, get_ms_array, get_arguments
from ...function.units import Units, GLOBAL_UNITS, Length
from ...system.molecule import Molecule
@@ -102,6 +102,7 @@ class CoulombEnergy(NonbondEnergy):
length_unit: str = 'nm',
energy_unit: str = 'kj/mol',
name: str = 'coulomb',
+ **kwargs,
):
super().__init__(
@@ -111,6 +112,7 @@ class CoulombEnergy(NonbondEnergy):
length_unit=length_unit,
energy_unit=energy_unit,
)
+ self._kwargs = get_arguments(locals(), kwargs)
if parameters is not None:
length_unit = parameters.get('length_unit')
@@ -192,7 +194,7 @@ class CoulombEnergy(NonbondEnergy):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -205,8 +207,8 @@ class CoulombEnergy(NonbondEnergy):
Index of neighbour atoms.
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour index.
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms.
pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
@@ -323,30 +325,6 @@ class DampedShiftedForceCoulomb(Cell):
):
r"""Calculate energy term.
- Args:
- coordinate (Tensor): Tensor of shape (B, A, D). Data type is float.
- Position coordinate of atoms in system
- neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int.
- Index of neighbour atoms.
- neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Mask for neighbour index.
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
- neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
- Distance between neighbours atoms.
- inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float.
- Reciprocal of distances.
- pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
- Tensor of PBC box. Default: None
-
- Returns:
- energy (Tensor): Tensor of shape (B, 1). Data type is float.
-
- Symbols:
- B: Batchsize, i.e. number of walkers in simulation
- A: Number of atoms.
- D: Spatial dimension of the simulation system. Usually is 3.
-
"""
# (B,A,N) = (B,A,1) * (B,A,N)
diff --git a/MindSPONGE/mindsponge/python/potential/energy/dihedral.py b/MindSPONGE/mindsponge/python/potential/energy/dihedral.py
index 777b20e0f3e7f3ccde0ce6a96f86ad168f760c76..35cb254d90d80711bab8119ce91b2c66559a55fa 100644
--- a/MindSPONGE/mindsponge/python/potential/energy/dihedral.py
+++ b/MindSPONGE/mindsponge/python/potential/energy/dihedral.py
@@ -33,7 +33,7 @@ from mindspore import Parameter
from .energy import EnergyCell
from ...colvar import Torsion
from ...function import functions as func
-from ...function import get_ms_array
+from ...function import get_ms_array, get_arguments
class DihedralEnergy(EnergyCell):
@@ -98,6 +98,7 @@ class DihedralEnergy(EnergyCell):
length_unit: str = 'nm',
energy_unit: str = 'kj/mol',
name: str = 'dihedral',
+ **kwargs,
):
super().__init__(
@@ -106,6 +107,7 @@ class DihedralEnergy(EnergyCell):
length_unit=length_unit,
energy_unit=energy_unit,
)
+ self._kwargs = get_arguments(locals(), kwargs)
if parameters is not None:
energy_unit = parameters.get('energy_unit')
@@ -171,7 +173,7 @@ class DihedralEnergy(EnergyCell):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -184,8 +186,8 @@ class DihedralEnergy(EnergyCell):
Index of neighbour atoms.
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour index.
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms.
inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/energy/energy.py b/MindSPONGE/mindsponge/python/potential/energy/energy.py
index e8e969b1e1551353372e82a657c67fef82fd8ca8..fc954778218cb771da08487a2ac9d7de54264377 100644
--- a/MindSPONGE/mindsponge/python/potential/energy/energy.py
+++ b/MindSPONGE/mindsponge/python/potential/energy/energy.py
@@ -33,45 +33,38 @@ from ...function.units import Units, Length, GLOBAL_UNITS
class EnergyCell(Cell):
- r"""Base class for energy terms.
-
- `EnergyCell` is usually used as a base class for individual energy terms in a classical force field.
- As the force field parameters usually has units, the units of the EnergyCell as an energy term
- should be the same as the units of the force field parameters, and not equal to the global units.
+ r"""
+ Base class for energy terms.
+ `EnergyCell` is usually used as a base class for individual energy terms in a classical force field.
+ As the force field parameters usually has units, the units of the `EnergyCell` as an energy term
+ should be the same as the units of the force field parameters, and not equal to the global units.
Args:
-
name (str): Name of energy. Default: 'energy'
-
length_unit (str): Length unit. If None is given, it will be assigned with the global length unit.
Default: 'nm'
-
energy_unit (str): Energy unit. If None is given, it will be assigned with the global energy unit.
Default: 'kj/mol'
-
- use_pbc (bool): Whether to use periodic boundary condition.
+ use_pbc (bool): Whether to use periodic boundary condition. Default: None
Returns:
-
- energy (Tensor): Tensor of shape `(B, 1)`. Data type is float.
+ Tensor of energy, Tensor of shape `(B, 1)`. Data type is float.
Supported Platforms:
-
``Ascend`` ``GPU``
Symbols:
-
B: Batchsize, i.e. number of walkers in simulation
-
"""
def __init__(self,
name: str = 'energy',
length_unit: str = 'nm',
energy_unit: str = 'kj/mol',
use_pbc: bool = None,
+ **kwargs
):
-
super().__init__()
+ self._kwargs = kwargs
self._name = name
@@ -89,26 +82,51 @@ class EnergyCell(Cell):
@property
def name(self) -> str:
- """name of energy"""
+ """
+ Name of energy.
+
+ Returns:
+ str, name of energy.
+ """
return self._name
@property
def use_pbc(self) -> bool:
- """whether to use periodic boundary condition"""
+ """
+ Whether to use periodic boundary condition.
+
+ Returns:
+ bool, the flag used to judge whether to use periodic boundary condition.
+ """
return self._use_pbc
@property
def length_unit(self) -> str:
- """length unit"""
+ """
+ Length unit.
+
+ Returns:
+ str, length unit.
+ """
return self.units.length_unit
@property
def energy_unit(self) -> str:
- """energy unit"""
+ """
+ Energy unit.
+
+ Returns:
+ str, energy unit.
+ """
return self.units.energy_unit
def set_input_unit(self, length_unit: Union[str, Units, Length]):
- """set the length unit for the input coordinates"""
+ """
+ Set the length unit for the input coordinates.
+
+ Args:
+ length_unit(Union[str, Units, Length]): The length unit for the input coordinates.
+ """
if length_unit is None:
self.input_unit_scale = 1
elif isinstance(length_unit, (str, Units, float)):
@@ -120,7 +138,13 @@ class EnergyCell(Cell):
return self
def set_cutoff(self, cutoff: float, unit: str = None):
- """set cutoff distances"""
+ """
+ Set cutoff distances.
+
+ Args:
+ cutoff(float): Cutoff distances.
+ unit(str): Length unit. Default: None
+ """
if cutoff is None:
self.cutoff = None
else:
@@ -129,23 +153,44 @@ class EnergyCell(Cell):
return self
def set_pbc(self, use_pbc: bool):
- """set whether to use periodic boundary condition."""
+ """
+ Set whether to use periodic boundary condition.
+
+ Args:
+ use_pbc(bool): Whether to use periodic boundary condition.
+ """
self._use_pbc = use_pbc
return self
def convert_energy_from(self, unit: str) -> float:
- """convert energy from outside unit to inside unit"""
+ """
+ Convert energy from outside unit to inside unit.
+
+ Args:
+ unit(str): Energy unit.
+
+ Returns:
+ float, energy according from a specified units.
+ """
return self.units.convert_energy_from(unit)
def convert_energy_to(self, unit: str) -> float:
- """convert energy from inside unit to outside unit"""
+ """
+ Convert energy from inside unit to outside unit.
+
+ Args:
+ unit(str): Energy unit.
+
+ Returns:
+ float, energy according to a specified units.
+ """
return self.units.convert_energy_to(unit)
def construct(self,
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -158,12 +203,10 @@ class EnergyCell(Cell):
Index of neighbour atoms. Default: None
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour index. Default: None
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms. Default: None
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms. Default: None
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms. Default: None
- inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float.
- Reciprocal of distances. Default: None
pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
Tensor of PBC box. Default: None
@@ -223,7 +266,7 @@ class NonbondEnergy(EnergyCell):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -236,8 +279,8 @@ class NonbondEnergy(EnergyCell):
Index of neighbour atoms. Default: None
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour index. Default: None
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms. Default: None
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms. Default: None
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms. Default: None
inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/energy/lj.py b/MindSPONGE/mindsponge/python/potential/energy/lj.py
index 72ff000d88c33c9635078cf713997c9113014759..57b228de3f42c493f3309844f2df28f4d6986590 100644
--- a/MindSPONGE/mindsponge/python/potential/energy/lj.py
+++ b/MindSPONGE/mindsponge/python/potential/energy/lj.py
@@ -32,7 +32,7 @@ from mindspore.ops import functional as F
from .energy import NonbondEnergy
from ... import function as func
-from ...function.functions import gather_value, get_ms_array
+from ...function.functions import gather_value, get_ms_array, get_arguments
class LennardJonesEnergy(NonbondEnergy):
@@ -106,6 +106,7 @@ class LennardJonesEnergy(NonbondEnergy):
length_unit: str = 'nm',
energy_unit: str = 'kj/mol',
name: str = 'vdw',
+ **kwargs,
):
super().__init__(
@@ -115,6 +116,7 @@ class LennardJonesEnergy(NonbondEnergy):
length_unit=length_unit,
energy_unit=energy_unit,
)
+ self._kwargs = get_arguments(locals(), kwargs)
if parameters is not None:
length_unit = parameters.get('length_unit')
@@ -175,7 +177,7 @@ class LennardJonesEnergy(NonbondEnergy):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -188,8 +190,8 @@ class LennardJonesEnergy(NonbondEnergy):
Index of neighbour atoms.
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour index.
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms.
inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/energy/pairs.py b/MindSPONGE/mindsponge/python/potential/energy/pairs.py
index 4a6aa1eeae4719aaeec0c7ed13b5d576fd0500ef..9cc81fdcf0c8de80b326aeadb29537a577bc21f9 100644
--- a/MindSPONGE/mindsponge/python/potential/energy/pairs.py
+++ b/MindSPONGE/mindsponge/python/potential/energy/pairs.py
@@ -34,7 +34,7 @@ from mindspore.ops import functional as F
from .energy import EnergyCell
from ...colvar import Distance
-from ...function.functions import get_integer, get_ms_array, keepdims_sum
+from ...function.functions import get_integer, get_ms_array, get_arguments, keepdims_sum
class NonbondPairwiseEnergy(EnergyCell):
@@ -117,6 +117,7 @@ class NonbondPairwiseEnergy(EnergyCell):
length_unit: str = 'nm',
energy_unit: str = 'kj/mol',
name: str = 'nb_pairs',
+ **kwargs
):
super().__init__(
@@ -125,6 +126,7 @@ class NonbondPairwiseEnergy(EnergyCell):
length_unit=length_unit,
energy_unit=energy_unit,
)
+ self._kwargs = get_arguments(locals(), kwargs)
if parameters is not None:
length_unit = parameters.get('length_unit')
@@ -238,7 +240,7 @@ class NonbondPairwiseEnergy(EnergyCell):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -251,8 +253,8 @@ class NonbondPairwiseEnergy(EnergyCell):
Index of neighbour atoms.
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour index.
- neighbour_coord (Tensor): Tensor of shape (B, A, N). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms.
inv_neigh_dis (Tensor): Tensor of shape (B, A, N). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/force.py b/MindSPONGE/mindsponge/python/potential/force.py
index bed5aa354953d494b10763345a2d759e291e7bbb..126f90154666c8e2922884912755f80436d5fbee 100644
--- a/MindSPONGE/mindsponge/python/potential/force.py
+++ b/MindSPONGE/mindsponge/python/potential/force.py
@@ -161,7 +161,7 @@ class ForceCell(Cell):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -174,8 +174,8 @@ class ForceCell(Cell):
Index of neighbour atoms. Default: None
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour atoms. Default: None
- neighbour_coord (Tensor): Tensor of shape (B, A, N, D). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N, D). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distances (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms. Default: None
pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/forcefield.py b/MindSPONGE/mindsponge/python/potential/forcefield.py
index 1328b23812f223e100d4677d4ad74dc70cf1c785..0568cb787766375aefffda81f8a46b0549386803 100644
--- a/MindSPONGE/mindsponge/python/potential/forcefield.py
+++ b/MindSPONGE/mindsponge/python/potential/forcefield.py
@@ -38,7 +38,8 @@ from .potential import PotentialCell
from ..data.parameters import ForceFieldParameters
from ..data.forcefield import get_forcefield
from ..system import Molecule
-from ..function.units import Units, Length
+from ..function import get_arguments
+from ..function import Units, Length
THIS_PATH = os.path.abspath(__file__)
@@ -94,6 +95,7 @@ class ForceFieldBase(PotentialCell):
energy_unit: str = None,
use_pbc: bool = None,
name: str = 'potential',
+ **kwargs,
):
super().__init__(
@@ -102,6 +104,7 @@ class ForceFieldBase(PotentialCell):
use_pbc=use_pbc,
name=name,
)
+ self._kwargs = get_arguments(locals(), kwargs)
if isinstance(cutoff, Length):
cutoff = cutoff(self.units)
@@ -113,7 +116,7 @@ class ForceFieldBase(PotentialCell):
self._num_energies = 0
self._energy_index = {}
- self.energies = None
+ self.energies: List[EnergyCell] = None
self.output_unit_scale = 1
self.set_energies(energy)
@@ -217,7 +220,7 @@ class ForceFieldBase(PotentialCell):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -230,8 +233,8 @@ class ForceFieldBase(PotentialCell):
Index of neighbour atoms. Default: None
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour atoms. Default: None
- neighbour_coord (Tensor): Tensor of shape (B, A, N, D). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N, D). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms. Default: None
pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
@@ -255,7 +258,7 @@ class ForceFieldBase(PotentialCell):
coordinate=coordinate,
neighbour_index=neighbour_index,
neighbour_mask=neighbour_mask,
- neighbour_coord=neighbour_coord,
+ neighbour_vector=neighbour_vector,
neighbour_distance=neighbour_distance,
pbc_box=pbc_box
)
@@ -321,6 +324,7 @@ class ForceField(ForceFieldBase):
length_unit: str = None,
energy_unit: str = None,
name: str = 'potential',
+ **kwargs,
):
super().__init__(
@@ -330,6 +334,7 @@ class ForceField(ForceFieldBase):
energy_unit=energy_unit,
name=name,
)
+ self._kwargs = get_arguments(locals(), kwargs)
use_pbc = system.use_pbc
diff --git a/MindSPONGE/mindsponge/python/potential/potential.py b/MindSPONGE/mindsponge/python/potential/potential.py
index 9dcf6bce6660d709a9668eb02f5823c5d3acae30..477e6d6bc16d165b4c551beac1e495decc607328 100644
--- a/MindSPONGE/mindsponge/python/potential/potential.py
+++ b/MindSPONGE/mindsponge/python/potential/potential.py
@@ -33,38 +33,45 @@ from ..function.operations import GetDistance, GetVector
class PotentialCell(EnergyCell):
- r"""Base class for potential energy.
-
- The `PotentialCell` is a special subclass of `EnergyCell`. The main difference with `EnergyCell` is
- that normally `EnergyCell` only outputs one energy term, so that `EnergyCell` returns a Tensor of
- the shape `(B, 1)`. And a `PotentialCell` can output multiple energy items, so it returns a Tensor
- of the shape `(B, E)`. Besides, by default the units of `PotentialCell` are equal to the global units.
+ r"""
+ Base class for potential energy.
+ The `PotentialCell` is a special subclass of `EnergyCell`. The main difference with `EnergyCell` is
+ that normally `EnergyCell` only outputs one energy term, so that `EnergyCell` returns a Tensor of
+ the shape `(B, 1)`. And a `PotentialCell` can output multiple energy items, so it returns a Tensor
+ of the shape `(B, E)`. Besides, by default the units of `PotentialCell` are equal to the global units.
Args:
-
- num_energies (int): Number of the outputs of energy terms. Default: 1
-
- length_unit (str): Length unit. If None is given, it will be assigned with the global length unit.
- Default: None
-
- energy_unit (str): Energy unit. If None is given, it will be assigned with the global energy unit.
- Default: None
-
- use_pbc (bool): Whether to use periodic boundary condition.
-
- Returns:
-
- energy (Tensor): Tensor of shape `(B, E)`. Data type is float.
+ num_energies(int): Number of the outputs of energy terms. Default: 1
+ energy_names(Union[str, List[str]]): Names of energy terms. Default: "potential".
+ length_unit(str): Length unit. If None is given, it will be assigned
+ with the global length unit. Default: None
+ energy_unit(str): Energy unit. If None is given, it will be assigned
+ with the global energy unit. Default: None
+ use_pbc(bool): Whether to use periodic boundary condition.
+ name(str): Name of energy. Default: "potential"
+
+ Inputs:
+ - **coordinates** (Tensor) - Tensor of shape (B, A, D). Data type is float.
+ Position coordinate of atoms in system.
+ - **neighbour_index** (Tensor) - Tensor of shape (B, A, N). Data type is int.
+ Index of neighbour atoms. Default: None
+ - **neighbour_mask** (Tensor) - Tensor of shape (B, A, N). Data type is bool.
+ Mask for neighbour atoms. Default: None
+ - **neighbour_vector** (Tensor) - Tensor of shape (B, A, N, D). Data type is bool.
+ Vectors from central atom to neighbouring atoms. Default: None
+ - **neighbour_distances** (Tensor) - Tensor of shape (B, A, N). Data type is float.
+ Distance between neighbours atoms. Default: None
+ - **pbc_box** (Tensor) - Tensor of shape (B, D). Data type is float. Tensor of PBC box. Default: None
+
+ Outputs:
+ potential, Tensor of shape `(B, E)`. Data type is float.
Supported Platforms:
-
``Ascend`` ``GPU``
Symbols:
-
B: Batchsize, i.e. number of walkers in simulation.
E: Number of energy terms.
-
"""
def __init__(self,
@@ -74,6 +81,7 @@ class PotentialCell(EnergyCell):
energy_unit: str = None,
use_pbc: bool = None,
name: str = 'potential',
+ **kwargs
):
super().__init__(
@@ -82,6 +90,7 @@ class PotentialCell(EnergyCell):
energy_unit=energy_unit,
use_pbc=use_pbc,
)
+ self._kwargs = kwargs
self._num_energies = get_integer(num_energies)
self._energy_names = []
@@ -104,28 +113,56 @@ class PotentialCell(EnergyCell):
@property
def exclude_index(self) -> Tensor:
- """exclude index"""
+ """
+ Exclude index.
+
+ Return:
+ Tensor, exclude index.
+ """
if self._exclude_index is None:
return None
return self.identity(self._exclude_index)
@property
def num_energies(self) -> int:
- """number of energy components"""
+ """
+ Number of energy components.
+
+ Return:
+ int, number of energy components.
+ """
return self._num_energies
@property
def energy_names(self) -> List[str]:
- """List of strings of energy names"""
+ """
+ List of strings of energy names.
+
+ Return:
+ List[str], strings of energy names.
+ """
return self._energy_names
def set_exclude_index(self, exclude_index: Tensor) -> Tensor:
- """set excluded index"""
+ """
+ Set excluded index.
+
+ Args:
+ exclude_index(Tensor): Excluded index of the system.
+
+ Return:
+ Tensor, excluded index.
+ """
self._exclude_index = self._check_exclude_index(exclude_index)
return self._exclude_index
def set_pbc(self, use_pbc: bool = None):
- """set PBC box"""
+ """
+ Set PBC box.
+
+ Args:
+ use_pbc(bool): Whether to use periodic boundary condition.
+ """
self._use_pbc = use_pbc
self.get_vector.set_pbc(use_pbc)
self.get_distance.set_pbc(use_pbc)
@@ -135,7 +172,7 @@ class PotentialCell(EnergyCell):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -148,8 +185,8 @@ class PotentialCell(EnergyCell):
Index of neighbour atoms. Default: None
neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
Mask for neighbour atoms. Default: None
- neighbour_coord (Tensor): Tensor of shape (B, A, N, D). Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape (B, A, N, D). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distances (Tensor): Tensor of shape (B, A, N). Data type is float.
Distance between neighbours atoms. Default: None
pbc_box (Tensor): Tensor of shape (B, D). Data type is float.
diff --git a/MindSPONGE/mindsponge/python/potential/toys/__init__.py b/MindSPONGE/mindsponge/python/potential/toys/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb9814fb13b827484f3aecbf47efba6c41599d58
--- /dev/null
+++ b/MindSPONGE/mindsponge/python/potential/toys/__init__.py
@@ -0,0 +1,27 @@
+# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
+# Peking University &
+# Huawei Technologies Co., Ltd
+#
+# This code is a part of MindSPONGE:
+# MindSpore Simulation Package tOwards Next Generation molecular modelling.
+#
+# MindSPONGE is open-source software based on the AI-framework:
+# MindSpore (https://www.mindspore.cn/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Toy model potentials"""
+
+from .tb import TiwaryBerne
+
+__all__ = ['TiwaryBerne']
diff --git a/MindSPONGE/mindsponge/python/potential/toys/tb.py b/MindSPONGE/mindsponge/python/potential/toys/tb.py
new file mode 100644
index 0000000000000000000000000000000000000000..c25f2702c879489a86ddcc0554a7d7a2958e1e8d
--- /dev/null
+++ b/MindSPONGE/mindsponge/python/potential/toys/tb.py
@@ -0,0 +1,180 @@
+# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
+# Peking University &
+# Huawei Technologies Co., Ltd
+#
+# This code is a part of MindSPONGE:
+# MindSpore Simulation Package tOwards Next Generation molecular modelling.
+#
+# MindSPONGE is open-source software based on the AI-framework:
+# MindSpore (https://www.mindspore.cn/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Potential"""
+
+from typing import Union, List, Tuple
+import numpy as np
+from numpy import ndarray
+
+import mindspore as ms
+from mindspore import Tensor
+from mindspore import ops
+from mindspore.ops import functional as F
+
+from .. import PotentialCell
+from ...function import get_ms_array, keepdims_sum
+
+
+class TiwaryBerne(PotentialCell):
+ r"""Potential energy of a toy model developed by Tiwary and Berne.
+
+ Reference:
+
+ Tiwary, P.; Berne, B. J.
+ Predicting Reaction Coordinates in Energy Landscapes with Diffusion Anisotropy [J].
+ The Journal of Chemical Physics, 2017, 147(15): 152701.
+
+ Args:
+
+ location: Union[Tensor, ndarray, List[float], Tuple[float]]:
+ Array of location(s) of metastable state(s) on the potential energy surface (PES).
+ The shape of the array is `(S, D)`, and the data type is float.
+ Default: ((-0.5, 0.5), (0.8, 1.2), (0.5, -0.3))
+
+ depth: Union[Tensor, ndarray, List[float], Tuple[float]]:
+ Array of depth of metastable state(s) on the potential energy surface (PES).
+ The shape of the array is `(S)`, and the data type is float.
+ Default: (16, 18, 16)
+
+ name (str): Name of the energy. Default: 'tiwary_berne'
+
+ Returns:
+
+ energy (Tensor): Tensor of shape `(B, 1)`. Data type is float.
+
+ Supported Platforms:
+
+ ``Ascend`` ``GPU``
+
+ Symbols:
+
+ S: Number of metastable state(s).
+ D: Spatial dimension of the toy model. Usually is 2.
+
+ """
+ def __init__(self,
+ location: Union[Tensor, ndarray, List[float], Tuple[float]] = ((-0.5, 0.5),
+ (0.8, 1.2),
+ (0.5, -0.3)),
+ depth: Union[Tensor, ndarray, List[float], Tuple[float]] = (16, 18, 16),
+ name: str = 'tiwary_berne',
+ ):
+
+ super().__init__(
+ num_energies=1,
+ name=name,
+ )
+
+ # (S, D)
+ self.location: Tensor = get_ms_array(location, ms.float32)
+ self.dimension = self.location.shape[-1]
+
+ if self.location.ndim == 1:
+ # (1, D) <- (D)
+ self.location = F.expand_dims(self.location, 0)
+
+ # S
+ num_states = self.location.shape[-2]
+
+ # (S)
+ self.depth: Tensor = get_ms_array(depth, ms.float32)
+ if self.depth.shape[-1] != num_states and self.depth.shape[-1] != 1:
+ raise ValueError(f'The number of depth {self.depth.shape[-1]} does not match '
+ f'the number of states {num_states}')
+
+ self.split = ops.Split(-1, 2)
+
+ def get_contour_2d(self,
+ vmin: float = -1,
+ vmax: float = 1.5,
+ num_grids: int = 50,
+ ) -> Tuple[ndarray, ndarray, ndarray]:
+ """get the data to plot the counter of PES for 2-D system"""
+ if self.dimension != 2:
+ raise ValueError(f'The function `get_contour_2d` can only be used in a 2-D system, '
+ f'but the dimension of the potential energy is {self.dimension}.')
+
+ grids = np.linspace(vmin, vmax, num_grids)
+ x, y = np.meshgrid(grids, grids)
+ coordinate = np.stack((x.ravel(), y.ravel()), 1)
+ coordinate = np.expand_dims(coordinate, -2)
+ z = self.construct(Tensor.from_numpy(coordinate)).reshape(num_grids, num_grids)
+ z = F.reshape(z, (num_grids, num_grids)).asnumpy()
+ z -= np.min(z)
+
+ return x, y, z
+
+ def construct(self,
+ coordinate: Tensor,
+ neighbour_index: Tensor = None,
+ neighbour_mask: Tensor = None,
+ neighbour_vector: Tensor = None,
+ neighbour_distance: Tensor = None,
+ pbc_box: Tensor = None
+ ) -> Tensor:
+ r"""Calculate potential energy.
+
+ Args:
+ coordinates (Tensor): Tensor of shape (B, A, 2). Data type is float.
+ Position coordinate of atoms in system.
+ neighbour_index (Tensor): Tensor of shape (B, A, N). Data type is int.
+ Index of neighbour atoms. Default: None
+ neighbour_mask (Tensor): Tensor of shape (B, A, N). Data type is bool.
+ Mask for neighbour atoms. Default: None
+ neighbour_vector (Tensor): Tensor of shape (B, A, N, 2). Data type is bool.
+ Vectors from central atom to neighbouring atoms.
+ neighbour_distances (Tensor): Tensor of shape (B, A, N). Data type is float.
+ Distance between neighbours atoms. Default: None
+ pbc_box (Tensor): Tensor of shape (B, 2). Data type is float.
+ Tensor of PBC box. Default: None
+
+ Returns:
+ potential (Tensor): Tensor of shape (B, E). Data type is float.
+
+ Symbols:
+ B: Batchsize, i.e. number of walkers in simulation
+ A: Number of atoms.
+ N: Maximum number of neighbour atoms.
+ E: Number of energy terms.
+
+ """
+ #pylint: disable=unused-argument
+
+ # (B, A, S, D) = (B, A, 1, D) - (S, D)
+ diff = F.expand_dims(coordinate, -2) - self.location
+
+ # (B, A, S) <- (B, A, S, D)
+ diff2: Tensor = F.reduce_sum(F.square(diff), -1)
+
+ # (B, A, S) = (S) * (B, A, S)
+ energy = -1 * self.depth * F.exp(-2 * diff2)
+ # (B, A) <- (B, A, S)
+ energy = F.reduce_sum(energy, -1)
+
+ # (B, A) <- (B, A, D)
+ restraint = 0.5 * F.reduce_sum(F.pow(coordinate, 6), -1)
+
+ # (B, A)
+ energy += restraint
+ # (B, 1) <- (B, A)
+ return keepdims_sum(energy, -1)
diff --git a/MindSPONGE/mindsponge/python/sampling/bias/metad.py b/MindSPONGE/mindsponge/python/sampling/bias/metad.py
index 4d52df7b7bd032df6fec4223f7b9817b8713ab3b..05af0362734269c444f0cca026572624dcf1946b 100644
--- a/MindSPONGE/mindsponge/python/sampling/bias/metad.py
+++ b/MindSPONGE/mindsponge/python/sampling/bias/metad.py
@@ -547,7 +547,7 @@ class Metadynamics(Bias):
coordinate: Tensor,
neighbour_index: Tensor = None,
neighbour_mask: Tensor = None,
- neighbour_coord: Tensor = None,
+ neighbour_vector: Tensor = None,
neighbour_distance: Tensor = None,
pbc_box: Tensor = None
):
@@ -560,8 +560,8 @@ class Metadynamics(Bias):
Index of neighbour atoms. Default: None
neighbour_mask (Tensor): Tensor of shape `(B, A, N)`. Data type is bool.
Mask for neighbour atoms. Default: None
- neighbour_coord (Tensor): Tensor of shape `(B, A, N)`. Data type is bool.
- Position coorindates of neighbour atoms.
+ neighbour_vector (Tensor): Tensor of shape `(B, A, N)`. Data type is bool.
+ Vectors from central atom to neighbouring atoms.
neighbour_distance (Tensor): Tensor of shape `(B, A, N)`. Data type is float.
Distance between neigh_shift atoms. Default: None
pbc_box (Tensor): Tensor of shape `(B, D)`. Data type is float.
diff --git a/MindSPONGE/mindsponge/python/sampling/modifier/__init__.py b/MindSPONGE/mindsponge/python/sampling/modifier/__init__.py
index d9487c5a993ff9fbb5cf23bd346198fb4c138ef3..8303f670aaeabfb1610c704fedcbb26cb8df23c9 100644
--- a/MindSPONGE/mindsponge/python/sampling/modifier/__init__.py
+++ b/MindSPONGE/mindsponge/python/sampling/modifier/__init__.py
@@ -23,6 +23,7 @@
"""Force modifier"""
from .modifier import ForceModifier
+from .mask import MaskedDriven
from .its import ModiferITS
-__all__ = ['ForceModifier', 'ModiferITS']
+__all__ = ['ForceModifier', 'MaskedDriven', 'ModiferITS']
diff --git a/MindSPONGE/mindsponge/python/sampling/modifier/mask.py b/MindSPONGE/mindsponge/python/sampling/modifier/mask.py
new file mode 100644
index 0000000000000000000000000000000000000000..968c8ae3d01e8fdfe36e42c1542097b9c2814ab1
--- /dev/null
+++ b/MindSPONGE/mindsponge/python/sampling/modifier/mask.py
@@ -0,0 +1,122 @@
+# Copyright 2021-2023 @ Shenzhen Bay Laboratory &
+# Peking University &
+# Huawei Technologies Co., Ltd
+#
+# This code is a part of MindSPONGE:
+# MindSpore Simulation Package tOwards Next Generation molecular modelling.
+#
+# MindSPONGE is open-source software based on the AI-framework:
+# MindSpore (https://www.mindspore.cn/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============================================================================
+"""Force modifier"""
+
+from typing import Union, Tuple
+from numpy import ndarray
+from mindspore import bool_
+from mindspore import Tensor
+from mindspore.ops import functional as F
+
+from .modifier import ForceModifier
+from ...function import get_ms_array
+
+
+class MaskedDriven(ForceModifier):
+ r"""Only drive part of atoms via modifying atomic force.
+
+ Args:
+
+ mask (Union[Tensor, ndarray]): Array of atomic mask to calculate the force.
+ The shape of array is `(A)` or `(B, A)`, and the type is bool.
+
+ update_pace (int): Frequency for updating the modifier. Default: 0
+
+ length_unit (str): Length unit. If None is given, it will be assigned with the global length unit.
+ Default: None
+
+ energy_unit (str): Energy unit. If None is given, it will be assigned with the global energy unit.
+ Default: None
+
+ Supported Platforms:
+
+ ``Ascend`` ``GPU``
+
+ """
+ def __init__(self,
+ mask: Union[Tensor, ndarray],
+ update_pace: int = 0,
+ length_unit: str = None,
+ energy_unit: str = None,
+ ):
+ super().__init__(
+ update_pace=update_pace,
+ length_unit=length_unit,
+ energy_unit=energy_unit
+ )
+
+ # (A) or (B, A)
+ self.mask = get_ms_array(mask, bool_)
+
+ def construct(self,
+ energy: Tensor = 0,
+ energy_ad: Tensor = 0,
+ force: Tensor = 0,
+ force_ad: Tensor = 0,
+ virial: Tensor = None,
+ virial_ad: Tensor = None,
+ ) -> Tuple[Tensor, Tensor, Tensor]:
+ r"""
+ aggregate atomic force.
+
+ Args:
+ energy (Tensor): Tensor of shape (B, 1). Data type is float.
+ Potential energy from ForceCell.
+ energy_ad (Tensor): Tensor of shape (B, 1). Data type is float.
+ Potential energy from EnergyCell.
+ force (Tensor): Tensor of shape (B, A, D). Data type is float.
+ Atomic forces from ForceCell.
+ force_ad (Tensor): Tensor of shape (B, A, D). Data type is float.
+ Atomic forces calculated by automatic differentiation.
+ virial (Tensor): Tensor of shape (B, D). Data type is float.
+ Virial calculated from ForceCell.
+ virial_ad (Tensor): Tensor of shape (B, D). Data type is float.
+ Virial calculated calculated by automatic differentiation.
+
+ Return:
+ energy (Tensor): Tensor of shape (B, 1). Data type is float.
+ Totoal potential energy for simulation.
+ force (Tensor): Tensor of shape (B, A, D). Data type is float.
+ Total atomic force for simulation.
+ virial (Tensor): Tensor of shape (B, D). Data type is float.
+ Total virial for simulation.
+
+ Symbols:
+ B: Batchsize, i.e. number of walkers in simulation
+ A: Number of atoms.
+ D: Spatial dimension of the simulation system. Usually is 3.
+ """
+
+ force = force + force_ad
+ energy = energy + energy_ad
+
+ if virial is not None or virial_ad is not None:
+ if virial is None:
+ virial = 0
+ if virial_ad is None:
+ virial_ad = 0
+ virial = virial + virial_ad
+
+ # (B, A, D) * (A, 1) OR (B, A, D) * (B, A, 1)
+ force = force * F.expand_dims(self.mask, -1)
+ return energy, force, virial
diff --git a/MindSPONGE/mindsponge/python/system/molecule/molecule.py b/MindSPONGE/mindsponge/python/system/molecule/molecule.py
index ef79dd31cb86108d45fdd9c6f8c16b67098010f5..9e195a34247ce1ffc8fc4b94cd36a9da8ae96606 100644
--- a/MindSPONGE/mindsponge/python/system/molecule/molecule.py
+++ b/MindSPONGE/mindsponge/python/system/molecule/molecule.py
@@ -44,75 +44,60 @@ from ...colvar.atoms import AtomsBase
from ...colvar.atoms import get_atoms as _get_atoms
from ...function import functions as func
from ...function.units import Units, GLOBAL_UNITS
-from ...function.functions import get_ms_array, get_ndarray, keepdims_prod
+from ...function.functions import get_ms_array, get_ndarray, get_arguments, keepdims_prod
class Molecule(Cell):
- r"""Base class for molecular system, used as the "system module" in MindSPONGE.
-
- The `Molecule` Cell can represent a molecule or a system consisting of multiple molecules.
-
- The major components of the `Molecule` Cell is the `Residue` Cell. A `Molecule` Cell can
- contain multiple `Residue` Cells.
+ r"""
+ Base class for molecular system, used as the "system module" in MindSPONGE.
+ The `Molecule` Cell can represent a molecule or a system consisting of multiple molecules.
+ The major components of the `Molecule` Cell is the `Residue` Cell. A `Molecule` Cell can
+ contain multiple `Residue` Cells.
Args:
-
- atoms (Union[List[Union[str, int]], ndarray]):
- Array of atoms. The data in array can be str of atom name.
- or int of atomic number. Defulat: None
-
- atom_name (Union[List[str], ndarray]):
- Array of atom name with data type `str`. Defulat: None
-
- atom_type (Union[List[str], ndarray]):
- Array of atom type with data type `str`. Defulat: None
-
- atom_mass (Union[Tensor, ndarray, List[float]]):
- Array of atom mass of shape `(B, A)` with data type `float`. Defulat: None
-
- atom_charge (Union[Tensor, ndarray, List[float]]):
- Array of atom charge of shape `(B, A)` with data type `float`. Defulat: None
-
- atomic_number (Union[Tensor, ndarray, List[float]]):
- Array of atomic number of shape `(B, A)` with data type `int`. Defulat: None
-
- bond (Union[Tensor, ndarray, List[int]]):
- Array of bond connection of shape `(B, b, 2)` with data type `int`. Defulat: None
-
- coordinate (Union[Tensor, ndarray, List[float]]):
- Tensor of atomic coordinates :math:`R` of shape `(B, A, D)` with data type `float`.
- Default: None
-
- pbc_box (Union[Tensor, ndarray, List[float]]):
- Tensor of box size :math:`\vec{L}` of periodic boundary condition (PBC).
- The shape of tensor is `(B, D)`, and the data type is `float`.
- Default: None
-
- template (Union[dict, str, List[Union[dict, str]]]):
- Template for molecule. It can be a `dict` in MindSPONGE template format
- or a `str` for the filename of a MindSPONGE template file. If a `str` is given,
- it will first look for a file with the same name in the current directory.
- If the file does not exist, it will search in the built-in template directory
- of MindSPONGE (`mindsponge.data.template`).
- Default: None.
-
- length_unit (str): Length unit. If `None` is given, the global length units will be used.
- Default: None
+ atoms(Union[List[Union[str, int]], ndarray]): Array of atoms. The data in array can be str of atom
+ name or int of atomic number. Defulat: ``None``.
+ atom_name(Union[List[str], ndarray]): Array of atom name with data type `str`. Defulat: ``None``.
+ atom_type(Union[List[str], ndarray]): Array of atom type with data type `str`. Defulat: ``None``.
+ atom_mass(Union[Tensor, ndarray, List[float]]): Array of atom mass of shape `(B, A)` with data type
+ `float`. Defulat: ``None``.
+ atom_charge(Union[Tensor, ndarray, List[float]]): Array of atom charge of shape `(B, A)` with data type
+ `float`. Defulat: ``None``.
+ atomic_number(Union[Tensor, ndarray, List[float]]): Array of atomic number of shape `(B, A)` with data type
+ `int`. Defulat: ``None``.
+ bond(Union[Tensor, ndarray, List[int]]): Array of bond connection of shape `(B, b, 2)` with data
+ type `int`. Defulat: ``None``.
+ coordinate(Union[Tensor, ndarray, List[float]]): Tensor of atomic coordinates :math:`R` of shape
+ :math:`(B, A, D)` with data type `float`. Default: ``None``.
+ pbc_box(Union[Tensor, ndarray, List[float]]): Tensor of box size :math:`\vec{L}` of periodic boundary
+ condition (PBC). The shape of tensor is :math:`(B, D)`,
+ and the data type is `float`. Default: ``None``.
+ template(Union[dict, str, List[Union[dict, str]]]): Template for molecule. It can be a `dict` in MindSPONGE
+ template format or a `str` for the filename of a
+ MindSPONGE template file. If a `str` is given,
+ it will first look for a file with the same name in the
+ current directory. If the file does not exist, it will
+ search in the built-in template directory of
+ MindSPONGE (`mindsponge.data.template`).
+ Default: ``None``.
+ residue(Union[Residue, List[Residue]]): Residue or a list of residues. If template is not ``None``,
+ only the residues in the template will be used.
+ Default: ``None``.
+ length_unit(str): Length unit. If ```None``` is given, the global length
+ units will be used. Default: ``None``.
+
+ Outputs:
+ - coordinate, Tensor of shape `(B, A, D)`. Data type is float.
+ - pbc_box, Tensor of shape `(B, D)`. Data type is float.
Supported Platforms:
-
``Ascend`` ``GPU``
Symbols:
-
B: Batchsize, i.e. number of walkers in simulation
-
A: Number of atoms.
-
b: Number of bonds.
-
D: Spatial dimension of the simulation system. Usually is 3.
-
"""
def __init__(self,
@@ -128,9 +113,11 @@ class Molecule(Cell):
template: Union[dict, str] = None,
residue: Union[Residue, List[Residue]] = None,
length_unit: str = None,
+ **kwargs,
):
super().__init__()
+ self._kwargs = get_arguments(locals(), kwargs)
if length_unit is None:
length_unit = GLOBAL_UNITS.length_unit
@@ -238,32 +225,89 @@ class Molecule(Cell):
@property
def shape(self):
+ r"""
+ Shape of atomic coordinate.
+
+ Returns:
+ Tuple, atomic coordinate.
+ """
return self.coordinate.shape
@property
def ndim(self):
+ r"""
+ Ndim of atomic coordinate.
+
+ Returns:
+ int, number of dims of atomic coordinate.
+ """
return self.coordinate.ndim
@property
def length_unit(self):
+ r"""
+ Length unit.
+
+ Returns:
+ str, length unit.
+ """
return self.units.length_unit
+ @property
+ def heavy_atom_mask(self):
+ r"""
+ mask for heavy (non-hydrogen) atoms.
+
+ Returns:
+ Tensor, mask for heavy atoms.
+ """
+ return msnp.where(self.atomic_number[0] > 1, 0, 1)
+
def convert_length_from(self, unit) -> float:
- """convert length from a specified units."""
+ """
+ Convert length from a specified units.
+
+ Args:
+ unit(Union[str, Units, Length, float, int]): Length unit.
+
+ Returns:
+ float, length according to a specified units.
+ """
return self.units.convert_length_from(unit)
def convert_length_to(self, unit) -> float:
- """convert length to a specified units."""
+ """
+ Convert length to a specified units.
+
+ Args:
+ unit(Union[str, Units, Length, float, int]): Length unit.
+
+ Returns:
+ float, length according to a specified units.
+ """
return self.units.convert_length_to(unit)
def move(self, shift: Tensor = None):
- """move the coordinate of the system"""
+ """
+ Move the coordinate of the system.
+
+ Args:
+ shift(Tensor): The displacement distance of the system. Default: None
+ """
if shift is not None:
self.update_coordinate(self.coordinate + Tensor(shift, ms.float32))
return self
def copy(self, shift: Tensor = None):
- """return a Molecule that copy the parameters of this molecule"""
+ """
+ Return a Molecule that copy the parameters of this molecule.
+
+ Args:
+ shift(Tensor): The displacement distance of the system. Default: None
+
+ Returns:
+ class, class Molecule that copy the parameters of this molecule.
+ """
coordinate = self.get_coordinate()
if shift is not None:
coordinate += Tensor(shift, ms.float32)
@@ -275,7 +319,13 @@ class Molecule(Cell):
)
def add_residue(self, residue: Residue, coordinate: Tensor = None):
- """add residue"""
+ """
+ Add residue to this molecule system.
+
+ Args:
+ residue(class): a Residue class of the residue added in the system.
+ coordinate(Tensor): The coordinate of the input residue. Default: None
+ """
if not isinstance(residue, list):
if isinstance(residue, Residue):
residue = [residue]
@@ -295,14 +345,24 @@ class Molecule(Cell):
return self
def append(self, system):
- """append the system"""
+ """
+ Append a system to this molecule system.
+
+ Args:
+ system(class): Another molecule system that will be added to this molecule system.
+ """
if not isinstance(system, Molecule):
raise TypeError(f'For append, the type of system must be "Molecule" but got: {type(system)}')
self.add_residue(system.residue, system.get_coordinate())
return self
def reduplicate(self, shift: Tensor):
- """duplicate the system to double of the origin size"""
+ """
+ Duplicate the system to double of the origin size.
+
+ Args:
+ shift(Tensor): The distance moved from the origin system.
+ """
shift = Tensor(shift, ms.float32)
self.residue.extend(copy.deepcopy(self.residue))
self.build_system()
@@ -311,7 +371,7 @@ class Molecule(Cell):
return self
def build_atom_type(self):
- """build atom type"""
+ """Build atom type."""
atom_type = ()
for i in range(self.num_residue):
atom_type += (self.residue[i].atom_type,)
@@ -319,7 +379,7 @@ class Molecule(Cell):
return self
def build_atom_charge(self):
- """build atom charge"""
+ """Build atom charge."""
charges = []
for i in range(self.num_residue):
charges.append(self.residue[i].atom_charge is not None)
@@ -335,7 +395,7 @@ class Molecule(Cell):
return self
def build_system(self):
- """build the system by residues"""
+ """Build the system by residues."""
if self.residue is None:
self.residue = None
return self
@@ -459,7 +519,15 @@ class Molecule(Cell):
return self
def build_space(self, coordinate: Tensor, pbc_box: Tensor = None):
- """build coordinate and PBC box"""
+ """
+ Build coordinate and PBC box.
+
+ Args:
+ coordinate(Tensor): The initial coordinate of system. If it's None, the system will
+ generate a random coordinate as its initial coordinate.
+ pbc_box(Tensor): The initial pbc_box of the system. If it's None, the system won't use pbc_box.
+ Default:None
+ """
# (B, A, D)
if coordinate is None:
coordinate = np.random.uniform(0, self.units.length(
@@ -508,7 +576,12 @@ class Molecule(Cell):
return self
def set_bond_length(self, bond_length: Tensor):
- """set bond length"""
+ """
+ Set bond length.
+
+ Args:
+ bond_length(Tensor): Set the bond length of the system.
+ """
if self.bond is None:
raise ValueError('Cannot setup bond_length because bond is None')
bond_length = Tensor(bond_length, ms.float32)
@@ -519,44 +592,104 @@ class Molecule(Cell):
return self
def residue_index(self, res_id: int) -> Tensor:
- """get index of residue"""
+ """
+ Get index of residue.
+
+ Args:
+ res_id(int): Residue index.
+
+ Returns:
+ Tensor, the system index of the residue.
+ """
return self.residue[res_id].system_index
def residue_bond(self, res_id: int) -> Tensor:
- """get bond index of residue"""
+ """
+ Get bond index of residue.
+
+ Args:
+ res_id(int): Residue index.
+
+ Returns:
+ Tensor, the bond index of residue.
+ """
if self.residue[res_id].bond is None:
return None
return self.residue[res_id].bond + self.residue[res_id].start_index
def residue_head(self, res_id: int) -> Tensor:
- """get head index of residue"""
+ """
+ Get head index of residue.
+
+ Args:
+ res_id(int): Residue index.
+
+ Returns:
+ Tensor, the head index of residue.
+ """
if self.residue[res_id].head_atom is None:
return None
return self.residue[res_id].head_atom + self.residue[res_id].start_index
def residue_tail(self, res_id: int) -> Tensor:
- """get tail index of residue"""
+ """
+ Get tail index of residue.
+
+ Args:
+ res_id(int): Residue index.
+
+ Returns:
+ Tensor, the tail index of residue.
+ """
if self.residue[res_id].tail_atom is None:
return None
return self.residue[res_id].tail_atom + self.residue[res_id].start_index
def residue_coordinate(self, res_id: int) -> Tensor:
- """get residue coordinate"""
+ """
+ Get residue coordinate.
+
+ Args:
+ res_id(int): Residue index.
+
+ Returns:
+ Tensor, residue coordinate in the system.
+ """
return F.gather_d(self.coordinate, -2, self.residue[res_id].system_index)
def get_volume(self) -> Tensor:
- """get volume of system"""
+ """
+ Get volume of system.
+
+ Returns:
+ Tensor, the volume of the system. If pbc_box is not used, the volume is None.
+ """
if self.pbc_box is None:
return None
return keepdims_prod(self.pbc_box, -1)
def space_parameters(self) -> list:
- """get the parameter of space (coordinates and pbc box)"""
+ """
+ Get the parameter of space (coordinates and pbc box).
+
+ Returns:
+ list[Tensor], coordinate and pbc_box. If pbc_box is not used, it will only return coordinate.
+ """
if self.pbc_box is None:
return [self.coordinate]
return [self.coordinate, self.pbc_box]
def trainable_params(self, recurse=True) -> list:
+ """
+ Trainable parameters.
+
+ Args:
+ recurse(bool): If true, yields parameters of this cell and all subcells. Otherwise, only yield parameters
+ that are direct members of this cell. Default: True
+
+ Returns:
+ list, all trainable system parameters.
+ """
return list(filter(lambda x: x.name.split('.')[-1] == 'coordinate', self.get_parameters(expand=recurse)))
def _check_coordianate(self, coordinate: Tensor) -> Tensor:
@@ -575,14 +708,30 @@ class Molecule(Cell):
return coordinate
def update_coordinate(self, coordinate: Tensor) -> Tensor:
- """update the parameter of coordinate"""
+ """
+ Update the parameter of coordinate.
+
+ Args:
+ coordinate(Tensor): Coordinates used to update system coordinates.
+
+ Returns:
+ Tensor, updated coordinate.
+ """
coordinate = F.assign(self.coordinate, coordinate)
if self.pbc_box is None:
return coordinate
return F.depend(coordinate, self.update_image())
def set_coordianate(self, coordinate: Tensor) -> Tensor:
- """set the value of coordinate"""
+ """
+ Set the value of coordinate.
+
+ Args:
+ coordinate(Tensor): Coordinates used to set system coordinates.
+
+ Returns:
+ Tensor, the coordinate of the system.
+ """
coordinate = self._check_coordianate(coordinate)
if coordinate is not None and coordinate.shape == self.coordinate.shape:
return self.update_coordinate(coordinate)
@@ -592,19 +741,44 @@ class Molecule(Cell):
return self.identity(coordinate)
def update_pbc_box(self, pbc_box: Tensor) -> Tensor:
- """update PBC box"""
+ """
+ Update PBC box
+
+ Args:
+ pbc_box(Tensor): PBC box used to update the system PBC box.
+
+ Returns:
+ Tensor, updated system PBC box.
+ """
pbc_box = F.assign(self.pbc_box, pbc_box)
return F.depend(pbc_box, self.update_image())
def set_pbc_grad(self, grad_box: bool) -> bool:
- """set whether to calculate the gradient of PBC box"""
+ """
+ Set whether to calculate the gradient of PBC box.
+
+ Args:
+ grad_box(bool): Whether to calculate the gradient of PBC box.
+
+ Returns:
+ bool, whether to calculate the gradient of PBC box.
+ """
if self.pbc_box is None:
return grad_box
self.pbc_box.requires_grad = grad_box
return self.pbc_box.requires_grad
def set_pbc_box(self, pbc_box: Tensor = None) -> Tensor:
- """set PBC box"""
+ """
+ Set PBC box.
+
+ Args:
+ pbc_box(Tensor): Set the PBC box of the system. If it's None, the system won't use PBC box.
+ Default: None
+
+ Returns:
+ Tensor, system PBC box.
+ """
if pbc_box is None:
self.pbc_box = None
self.use_pbc = False
@@ -633,7 +807,12 @@ class Molecule(Cell):
return self.pbc_box
def repeat_box(self, lattices: list):
- """repeat the system according to the lattices of PBC box"""
+ """
+ Repeat the system according to the lattices of PBC box.
+
+ Args:
+ lattices(list): Lattices of PBC box.
+ """
if self.pbc_box is None:
raise RuntimeError('repeat_box() cannot be used without pbc_box, '
'please use set_pbc_box() to set pbc_box first '
@@ -673,13 +852,30 @@ class Molecule(Cell):
return self
def coordinate_in_pbc(self, shift: float = 0) -> Tensor:
- """get the coordinate in a whole PBC box"""
+ """
+ Get the coordinate in a whole PBC box.
+
+ Args:
+ shift(float): Offset ratio relative to box size. Default: 0
+
+ Returns:
+ Tensor, the coordinate in the PBC box. Shape `(B, ..., D)`. Data type is float.
+ """
coordinate = self.identity(self.coordinate)
pbc_box = self.identity(self.pbc_box)
return func.coordinate_in_pbc(coordinate, pbc_box, shift)
def calc_image(self, shift: float = 0) -> Tensor:
- """calculate the image of coordinate"""
+ r"""
+ Calculate the image of coordinate.
+
+ Args:
+ shift(float): Offset ratio :math:`c` relative to box size :math:`\vec{L}`.
+ Default: ``0``.
+
+ Returns:
+ Tensor, the image of coordinate.
+ """
coordinate = self.identity(self.coordinate)
pbc_box = self.identity(self.pbc_box)
image = func.pbc_image(coordinate, pbc_box, shift)
@@ -688,13 +884,26 @@ class Molecule(Cell):
return image
def update_image(self, image: Tensor = None) -> bool:
- """update the image of coordinate"""
+ """
+ Update the image of coordinate.
+
+ Args:
+ image(Tensor): The image of coordinate used to update the image of system coordinate. Default: None
+
+ Returns:
+ bool, whether successfully update the image of coordinate.
+ """
if image is None:
image = self.calc_image()
return F.assign(self.image, image)
def set_length_unit(self, unit):
- """set the length unit of system"""
+ """
+ Set the length unit of system.
+
+ Args:
+ unit(Union[str, Units, Length, float, int]): Length unit.
+ """
scale = self.units.convert_length_to(unit)
coordinate = self.coordinate * scale
self.update_coordinate(coordinate)
@@ -705,13 +914,29 @@ class Molecule(Cell):
return self
def calc_colvar(self, colvar: Colvar) -> Tensor:
- """calculate the value of specific collective variables in the system"""
+ """
+ Calculate the value of specific collective variables in the system.
+
+ Args:
+ colvar(class): Base class for generalized collective variables (CVs) :math:`s(R)`.
+
+ Returns:
+ Tensor, the value of a collective variables :math:`s(R)`.
+ """
coordinate = self.identity(self.coordinate)
pbc_box = None if self.pbc_box is None else self.identity(self.pbc_box)
return colvar(coordinate, pbc_box)
def get_atoms(self, atoms: Union[Tensor, Parameter, ndarray, str, list, tuple]) -> AtomsBase:
- """get Atoms from the system"""
+ """
+ Get atoms from the system.
+
+ Args:
+ atoms(Union[Tensor, Parameter, ndarray, str, list, tuple]): List of atoms.
+
+ Returns:
+ class, atoms or groups of atoms.
+ """
try:
atoms = _get_atoms(atoms)
except TypeError:
@@ -720,7 +945,16 @@ class Molecule(Cell):
return atoms
def get_coordinate(self, atoms: AtomsBase = None) -> Tensor:
- """get Tensor of coordinate"""
+ """
+ Get Tensor of coordinate.
+
+ Args:
+ atoms(class): Base class for specific atoms group, used as the "atoms group module" in MindSPONGE.
+ Default: None
+
+ Returns:
+ Tensor. Coordinate. Data type is float.
+ """
coordinate = self.identity(self.coordinate)
if atoms is None:
return coordinate
@@ -728,7 +962,12 @@ class Molecule(Cell):
return atoms(coordinate, pbc_box)
def get_pbc_box(self) -> Tensor:
- """get Tensor of PBC box"""
+ """
+ Get Tensor of PBC box.
+
+ Returns:
+ Tensor, PBC box
+ """
if self.pbc_box is None:
return None
return self.identity(self.pbc_box)
diff --git a/MindSPONGE/mindsponge/python/system/molecule/protein.py b/MindSPONGE/mindsponge/python/system/molecule/protein.py
index 50067dd4fe46c89283abe147f04447be43237274..106c4d9fb84056e76be26fdba95ce5e837dba547 100644
--- a/MindSPONGE/mindsponge/python/system/molecule/protein.py
+++ b/MindSPONGE/mindsponge/python/system/molecule/protein.py
@@ -32,6 +32,7 @@ from .molecule import Molecule
from ..residue.amino import AminoAcid
from ..modelling.hadder import read_pdb
from ...data.template import get_template
+from ...function import get_arguments
backbone_atoms = np.array(['N', 'CA', 'C', 'O'], np.str_)
@@ -93,9 +94,11 @@ class Protein(Molecule):
rebuild_hydrogen: bool = False,
rebuild_suffix: str = '_addH',
length_unit: str = None,
+ **kwargs
):
super().__init__(length_unit=length_unit)
+ self._kwargs = get_arguments(locals(), kwargs)
if pdb is None:
#TODO
diff --git a/MindSPONGE/mindsponge/python/system/residue/amino.py b/MindSPONGE/mindsponge/python/system/residue/amino.py
index b1a275108612209d86f99a70bf549b9a33f4ff54..584e7f8be400a0208932fea9749c08a9433e6597 100644
--- a/MindSPONGE/mindsponge/python/system/residue/amino.py
+++ b/MindSPONGE/mindsponge/python/system/residue/amino.py
@@ -27,6 +27,7 @@ from typing import Union, List
from numpy import ndarray
from mindspore import jit_class
from .residue import Residue
+from ...function import get_arguments
@jit_class
@@ -63,6 +64,7 @@ class AminoAcid(Residue):
template: dict = None,
atom_name: Union[str, List[str], ndarray] = None,
start_index: int = 0,
+ **kwargs,
):
super().__init__(
@@ -71,3 +73,4 @@ class AminoAcid(Residue):
name=(name.replace('HIE', 'HIS') if 'HIE' in name else name),
template=template,
)
+ self._kwargs = get_arguments(locals(), kwargs)
diff --git a/MindSPONGE/mindsponge/python/system/residue/residue.py b/MindSPONGE/mindsponge/python/system/residue/residue.py
index b3f70f01d2c889135e3a8ad63449d52a949b4b32..1cae011dc54b88e01a0eabfe0db997999026c714 100644
--- a/MindSPONGE/mindsponge/python/system/residue/residue.py
+++ b/MindSPONGE/mindsponge/python/system/residue/residue.py
@@ -34,70 +34,54 @@ from mindspore import jit_class
from mindspore.ops import functional as F
from mindspore.common import Tensor
-from ...function.functions import get_integer, get_ms_array
-from ...data.elements import elements, element_set, element_dict, atomic_mass
+from ...function.functions import get_integer, get_ms_array, get_arguments
+from ...data.element import elements, element_set, element_dict, atomic_mass
from ...data.template import get_template, get_template_index
@jit_class
class Residue:
- r"""Base class for residue.
+ r"""
+ Base class for residue.
+ The `Residue` Cell is the component of the `Molecule` (System) Cell.
+ A `Residue` can represent not only an amino acid residue, but also a small molecule in a molecular system,
+ such as a water molecule, an inorganic salt ion, etc. This means that the `Residue` Cell has
+ a similar concept to the "residue" in a PDB file.
- The `Residue` Cell is the component of the `Molecule` (System) Cell.
-
- A `Residue` can represent not only an amino acid residue, but also a small molecule in a molecular system,
- such as a water molecule, an inorganic salt ion, etc. This means that the `Residue` Cell has
- a similar concept to the "residue" in a PDB file.
-
- NOTE: `Residue` Cell is only used to represent the atomic properties and bond connections,
- but does NOT contain atomic coordinates.
+ NOTE: `Residue` Cell is only used to represent the atomic properties and bond connections,
+ but does NOT contain atomic coordinates.
Args:
-
- atom_name (Union[List[str], ndarray]):
- Array of atom name with data type `str`. Defulat: None
-
- atom_type (Union[List[str], ndarray]):
- Array of atom type with data type `str`. Defulat: None
-
- atom_mass (Union[Tensor, ndarray, List[float]]):
- Array of atom mass of shape `(B, A)` with data type `float`. Defulat: None
-
- atom_charge (Union[Tensor, ndarray, List[float]]):
- Array of atom charge of shape `(B, A)` with data type `float`. Defulat: None
-
- atomic_number (Union[Tensor, ndarray, List[float]]):
- Array of atomic number of shape `(B, A)` with data type `int`. Defulat: None
-
- bond (Union[Tensor, ndarray, List[int]]):
- Array of bond connection of shape `(B, b, 2)` with data type `int`. Defulat: None
-
- head_atom (int): Index of the head atom to connect with the previous residue.
- Default: None
-
- tail_atom (int): Index of the tail atom to connect with the next residue.
- Default: None
-
- start_index (int): The start index of the first atom in this residue.
-
- template (Union[dict, str]):
- Template for residue. It can be a `dict` in MindSPONGE template format
- or a `str` for the filename of a MindSPONGE template file. If a `str` is given,
- it will first look for a file with the same name in the current directory.
- If file does not exist, it will search in the built-in template directory
- of MindSPONGE (`mindsponge.data.template`).
- Default: None.
-
- name (str): Name of the residue. Default: 'MOL'
+ atom_name (Union[List[str], ndarray]): Array of atom name with data type `str`. Defulat: None
+ atom_type (Union[List[str], ndarray]): Array of atom type with data type `str`. Defulat: None
+ atom_mass (Union[Tensor, ndarray, List[float]]): Array of atom mass of shape `(B, A)` with data type
+ `float`. Defulat: None
+ atom_charge (Union[Tensor, ndarray, List[float]]): Array of atom charge of shape `(B, A)` with data type
+ `float`. Defulat: None
+ atomic_number (Union[Tensor, ndarray, List[float]]): Array of atomic number of shape `(B, A)` with data type
+ `int`. Defulat: None
+ bond (Union[Tensor, ndarray, List[int]]): Array of bond connection of shape `(B, b, 2)` with data
+ type `int`. Defulat: None
+ head_atom (int): Index of the head atom to connect with the previous
+ residue. Default: None
+ tail_atom (int): Index of the tail atom to connect with the next residue.
+ Default: None
+ start_index (int): The start index of the first atom in this residue.
+ Default: 0
+ name (str): Name of the residue. Default: 'MOL'
+ template (Union[dict, str]): Template for residue. It can be a `dict` in MindSPONGE
+ template format or a `str` for the filename of a
+ MindSPONGE template file. If a `str` is given, it will
+ first look for a file with the same name in the
+ current directory. If file does not exist, it will
+ search in the built-in template directory
+ of MindSPONGE (`mindsponge.data.template`).
+ Default: None.
Symbols:
-
B: Batchsize, i.e. number of walkers in simulation
-
A: Number of atoms.
-
b: Number of bonds.
-
"""
def __init__(self,
@@ -112,7 +96,9 @@ class Residue:
start_index: int = 0,
name: str = 'MOL',
template: Union[dict, str] = None,
+ **kwargs,
):
+ self._kwargs = get_arguments(locals(), kwargs)
self._name = name
@@ -308,6 +294,12 @@ class Residue:
@property
def name(self) -> str:
+ """
+ Get the name of the residue.
+
+ Returns:
+ str, the name of the residue.
+ """
return str(self._name)
@classmethod
@@ -362,31 +354,61 @@ class Residue:
return bond
def build_atom_mass(self, template: dict):
- """build atom mass"""
+ """
+ According to the name of the atom, find the index of the atom in the template.
+ Get atom mass of the atom with the index in the template and build it into the residue.
+
+ Args:
+ template(dict): Template for residue.
+ """
atom_index = get_template_index(template, self.atom_name)
self.atom_mass = Tensor(self._get_atom_mass(template, atom_index), ms.float32)
return self
def build_atomic_number(self, template: dict):
- """build atomic number"""
+ """
+ According to the name of the atom, find the index of the atom in the template.
+ Get atomic number of the atom with the index in the template and build it into the residue.
+
+ Args:
+ template(dict): Template for residue.
+ """
atom_index = get_template_index(template, self.atom_name)
self.atomic_number = Tensor(self._get_atomic_number(template, atom_index), ms.int32)
return self
def build_atom_type(self, template: dict):
- """build atom type"""
+ """
+ According to the name of the atom, find the index of the atom in the template.
+ Get atom type of the atom with the index in the template and build it into the residue.
+
+ Args:
+ template(dict): Template for residue.
+ """
atom_index = get_template_index(template, self.atom_name)
self.atom_type = self._get_atom_type(template, atom_index)
return self
def build_atom_charge(self, template: dict):
- """build atom type"""
+ """
+ According to the name of the atom, find the index of the atom in the template.
+ Get atom charge of the atom with the index in the template and build it into the residue.
+
+ Args:
+ template(dict): Template for residue.
+ """
atom_index = get_template_index(template, self.atom_name)
self.atom_charge = Tensor(self._get_atom_charge(template, atom_index), ms.float32)
return self
def build_bond(self, template: dict):
- """build bond"""
+ """
+ According to the name of the atom, find the index of the atom in the template.
+ Get bond of the atom with the index in the template and build it into the residue.
+
+ Args:
+ template(dict): Template for residue.
+ """
atom_index = get_template_index(template, self.atom_name)
self.bond = Tensor(self._get_bond(template, atom_index), ms.int32)
return self
@@ -398,7 +420,16 @@ class Residue:
atom_charge: float = None,
atomic_number: str = None,
):
- """set atom"""
+ """
+ Add an atom to the residue.
+
+ Args:
+ atom_name(str): Atom name. Default: None
+ atom_type(str): Atom type. Default: None
+ atom_mass(float): Atom mass. Default: None
+ atom_charge(float): Atom charge. Default: None
+ atomic_number(str): Atomic number. Default: None
+ """
if atom_name is None and atomic_number is None:
raise ValueError('atom_name and atomic_number cannot both be None')
@@ -484,7 +515,12 @@ class Residue:
return self
def broadcast_multiplicity(self, multi_system: int):
- """broadcast the information to the number of multiple system"""
+ """
+ Broadcast the information to the number of multiple system.
+
+ Args:
+ multi_system(int): The number of multiple systems.
+ """
if multi_system <= 0:
raise ValueError('multi_system must be larger than 0!')
if self.multi_system > 1:
@@ -516,12 +552,22 @@ class Residue:
return self
def set_name(self, name: str):
- """set residue name"""
+ """
+ Set residue name.
+
+ Args:
+ name(str): Residue name.
+ """
self._name = name
return self
def set_start_index(self, start_index: int):
- """set the start index"""
+ """
+ Set the start index.
+
+ Args:
+ start_index(int): The start index.
+ """
if start_index < 0:
raise ValueError('The start_index cannot be smaller than 0!')
self.start_index = get_integer(start_index)
diff --git a/MindSPONGE/requirements.txt b/MindSPONGE/requirements.txt
index d708a293cc2136c512552f8f9f04d4600a5f075f..e7c4fa7f3a929d9da16b6feec4603bad84057a4d 100644
--- a/MindSPONGE/requirements.txt
+++ b/MindSPONGE/requirements.txt
@@ -7,3 +7,6 @@ glob2 >= 0.6
h5py >= 3.6.0
absl-py >= 1.1.0
biotite >= 0.35.0
+descriptastorus >= 2.6.0
+rdkit
+mindspore-gl
\ No newline at end of file
diff --git a/MindSPONGE/tutorials/advanced/alad.pdb b/MindSPONGE/tutorials/advanced/alad.pdb
new file mode 100644
index 0000000000000000000000000000000000000000..af722c9a0bdf4b21ace3a0427ede13d70e7c3f85
--- /dev/null
+++ b/MindSPONGE/tutorials/advanced/alad.pdb
@@ -0,0 +1,24 @@
+ATOM 1 H1 ACE 1 2.000 1.000 -0.000 1.00 0.00
+ATOM 2 CH3 ACE 1 2.000 2.090 0.000 1.00 0.00
+ATOM 3 H2 ACE 1 1.486 2.454 0.890 1.00 0.00
+ATOM 4 H3 ACE 1 1.486 2.454 -0.890 1.00 0.00
+ATOM 5 C ACE 1 3.427 2.641 -0.000 1.00 0.00
+ATOM 6 O ACE 1 4.391 1.877 -0.000 1.00 0.00
+ATOM 7 N ALA 2 3.555 3.970 -0.000 1.00 0.00
+ATOM 8 H ALA 2 2.733 4.556 -0.000 1.00 0.00
+ATOM 9 CA ALA 2 4.853 4.614 -0.000 1.00 0.00
+ATOM 10 HA ALA 2 5.408 4.316 0.890 1.00 0.00
+ATOM 11 CB ALA 2 5.661 4.221 -1.232 1.00 0.00
+ATOM 12 HB1 ALA 2 5.123 4.521 -2.131 1.00 0.00
+ATOM 13 HB2 ALA 2 6.630 4.719 -1.206 1.00 0.00
+ATOM 14 HB3 ALA 2 5.809 3.141 -1.241 1.00 0.00
+ATOM 15 C ALA 2 4.713 6.129 0.000 1.00 0.00
+ATOM 16 O ALA 2 3.601 6.653 0.000 1.00 0.00
+ATOM 17 N NME 3 5.846 6.835 0.000 1.00 0.00
+ATOM 18 H NME 3 6.737 6.359 -0.000 1.00 0.00
+ATOM 19 CH3 NME 3 5.846 8.284 0.000 1.00 0.00
+ATOM 20 HH31 NME 3 4.819 8.648 0.000 1.00 0.00
+ATOM 21 HH32 NME 3 6.360 8.648 0.890 1.00 0.00
+ATOM 22 HH33 NME 3 6.360 8.648 -0.890 1.00 0.00
+TER
+END
diff --git a/OWNERS b/OWNERS
index 78f597971694ccd47c3d88169c1e7f4f32178879..d8ceebed22236388e659475a2d0c8c1507402138 100644
--- a/OWNERS
+++ b/OWNERS
@@ -7,6 +7,7 @@ approvers:
- chuht
- kangyangzc
- ljl0711
+- yi-zhang95
reviewers:
- wang_zi_dong
@@ -17,3 +18,4 @@ reviewers:
- chuht
- kangyangzc
- ljl0711
+- yi-zhang95
diff --git a/README_CN.md b/README_CN.md
index 9fa8c6bc6c01765a08324878fd8bd5ed6f29fdfe..b2c78d4ea6c68e045b18091d61b809cdcc8a12cb 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -1,13 +1,101 @@
-# MindScience
-
-[View English](README.md)
-
-## 概述
-
-MindScience是基于MindSpore融合架构打造的科学计算行业套件,包含了业界领先的数据集、基础模型、预置高精度模型和前后处理工具,加速了科学行业应用开发。目前已推出面向电子信息行业的MindSpore Elec套件和面向生命科学行业的MindSPONGE套件,分别实现了电磁仿真性能提升10倍和生物制药化合物模拟效率提升50%。
-
-## 架构图
-
-